gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
""" Code that calculates clutter by using running stats. """
from copy import deepcopy
from distributed import Client, LocalCluster
from .config import get_field_names
import warnings
import numpy as np
import pyart
try:
from dask import delayed
import dask.array as da
except ImportError:
warnings.warn('Dask is not installed. Radar clutter module'
+ ' needs Dask to be able to run.')
pass
def tall_clutter(files, config,
clutter_thresh_min=0.0002,
clutter_thresh_max=0.25, radius=1,
write_radar=True, out_file=None,
use_dask=False):
"""
Wind Farm Clutter Calculation
Parameters
----------
files : list
List of radar files used for the clutter calculation.
config : str
String representing the configuration for the radar.
Such possible configurations are listed in default_config.py
Other Parameters
----------------
clutter_thresh_min : float
Threshold value for which, any clutter values above the
clutter_thres_min will be considered clutter, as long as they
are also below the clutter_thres_max.
clutter_thresh_max : float
Threshold value for which, any clutter values below the
clutter_thres_max will be considered clutter, as long as they
are also above the clutter_thres_min.
radius : int
Radius of the area surrounding the clutter gate that will
be also flagged as clutter.
write_radar : bool
Whether to or not, to write the clutter radar as a netCDF file.
Default is True.
out_file : string
String of location and filename to write the radar object too,
if write_radar is True.
use_dask : bool
Use dask instead of running stats for calculation. The will reduce
run time.
Returns
-------
clutter_radar : Radar
Radar object with the clutter field that was calculated.
This radar only has the clutter field, but maintains all
other radar specifications.
"""
field_names = get_field_names(config)
refl_field = field_names["reflectivity"]
vel_field = field_names["velocity"]
ncp_field = field_names["normalized_coherent_power"]
def get_reflect_array(file, first_shape):
""" Retrieves a reflectivity array for a radar volume. """
try:
radar = pyart.io.read(file, include_fields=[refl_field,
ncp_field, vel_field])
reflect_array = deepcopy(radar.fields[refl_field]['data'])
ncp = radar.fields[ncp_field]['data']
height = radar.gate_z["data"]
up_in_the_air = height > 2000.0
the_mask = np.logical_or.reduce(
(ncp < 0.8, reflect_array.mask, up_in_the_air))
reflect_array = np.ma.masked_where(the_mask, reflect_array)
del radar
if reflect_array.shape == first_shape:
return reflect_array.filled(fill_value=np.nan)
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
return np.nan*np.zeros(first_shape)
if use_dask is False:
run_stats = _RunningStats()
first_shape = 0
for file in files:
try:
radar = pyart.io.read(file)
reflect_array = radar.fields[refl_field]['data']
ncp = deepcopy(radar.fields[ncp_field]['data'])
#reflect_array = np.ma.masked_where(ncp < 0.7, reflect_array)
if first_shape == 0:
first_shape = reflect_array.shape
clutter_radar = radar
run_stats.push(reflect_array)
if reflect_array.shape == first_shape:
run_stats.push(reflect_array)
del radar
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
continue
mean = run_stats.mean()
stdev = run_stats.standard_deviation()
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values_no_mask = clutter_values.filled(
clutter_values_max + 1)
else:
cluster = LocalCluster(n_workers=20, processes=True)
client = Client(cluster)
first_shape = 0
i = 0
while first_shape == 0:
try:
radar = pyart.io.read(files[i])
reflect_array = radar.fields[refl_field]['data']
first_shape = reflect_array.shape
clutter_radar = radar
except(TypeError, OSError):
i = i + 1
print(file + ' is corrupt...skipping!')
continue
arrays = [delayed(get_reflect_array)(file, first_shape)
for file in files]
array = [da.from_delayed(a, shape=first_shape, dtype=float)
for a in arrays]
array = da.stack(array, axis=0)
print('## Calculating mean in parallel...')
mean = np.array(da.nanmean(array, axis=0))
print('## Calculating standard deviation...')
count = np.array(da.sum(da.isfinite(array), axis=0))
stdev = np.array(da.nanstd(array, axis=0))
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values = np.ma.masked_where(np.logical_or(
clutter_values.mask, count < 20), clutter_values)
# Masked arrays can suck
clutter_values_no_mask = clutter_values.filled(
(clutter_thresh_max + 1))
shape = clutter_values.shape
mask = np.ma.getmask(clutter_values)
is_clutters = np.argwhere(
np.logical_and.reduce((clutter_values_no_mask > clutter_thresh_min,
clutter_values_no_mask < clutter_thresh_max,
)))
clutter_array = _clutter_marker(is_clutters, shape, mask, radius)
clutter_radar.fields.clear()
clutter_array = clutter_array.filled(0)
clutter_dict = _clutter_to_dict(clutter_array)
clutter_value_dict = _clutter_to_dict(clutter_values)
clutter_value_dict["long_name"] = "Clutter value (std. dev/mean Z)"
clutter_value_dict["standard_name"] = "clutter_value"
clutter_radar.add_field('ground_clutter', clutter_dict,
replace_existing=True)
clutter_radar.add_field('clutter_value', clutter_value_dict,
replace_existing=True)
if write_radar is True:
pyart.io.write_cfradial(out_file, clutter_radar)
del clutter_radar
return
# Adapted from http://stackoverflow.com/a/17637351/6392167
class _RunningStats():
""" Calculated Mean, Variance and Standard Deviation, but
uses the Welford algorithm to save memory. """
def __init__(self):
self.n = 0
self.old_m = 0
self.new_m = 0
self.old_s = 0
self.new_s = 0
def clear(self):
""" Clears n variable in stat calculation. """
self.n = 0
def push(self, x):
""" Takes an array and the previous array and calculates mean,
variance and standard deviation, and continues to take multiple
arrays one at a time. """
shape = x.shape
ones_arr = np.ones(shape)
mask = np.ma.getmask(x)
mask_ones = np.ma.array(ones_arr, mask=mask)
add_arr = np.ma.filled(mask_ones, fill_value=0.0)
self.n += add_arr
mask_n = np.ma.array(self.n, mask=mask)
fill_n = np.ma.filled(mask_n, fill_value=1.0)
if self.n.max() == 1.0:
self.old_m = self.new_m = np.ma.filled(x, 0.0)
self.old_s = np.zeros(shape)
else:
self.new_m = np.nansum(np.dstack(
(self.old_m, (x-self.old_m) / fill_n)), 2)
self.new_s = np.nansum(np.dstack(
(self.old_s, (x-self.old_m) * (x-self.new_m))), 2)
self.old_m = self.new_m
self.old_s = self.new_s
def mean(self):
""" Returns mean once all arrays are inputed. """
return self.new_m if np.any(self.n) else 0.0
def variance(self):
""" Returns variance once all arrays are inputed. """
return self.new_s / (self.n-1) if (self.n.max() > 1.0) else 0.0
def standard_deviation(self):
""" Returns standard deviation once all arrays are inputed. """
return np.ma.sqrt(self.variance())
def _clutter_marker(is_clutters, shape, mask, radius):
""" Takes clutter_values(stdev/mean)and the clutter_threshold
and calculates where X-SAPR wind farm clutter is occurring at
the SGP ARM site. """
temp_array = np.zeros(shape)
# Inserting here possible other fields that can help distinguish
# whether a gate is clutter or not.
temp_array = np.pad(temp_array, radius,
mode='constant', constant_values=-999)
is_clutters = is_clutters + radius
x_val, y_val = np.ogrid[-radius:(radius + 1),
-radius:(radius + 1)]
circle = (x_val*x_val) + (y_val*y_val) <= (radius*radius)
for is_clutter in is_clutters:
ray, gate = is_clutter[0], is_clutter[1]
frame = temp_array[ray - radius:ray + radius + 1,
gate - radius:gate + radius + 1]
temp_array[ray - radius:ray + radius + 1,
gate - radius:gate + radius + 1] = np.logical_or(
frame, circle)
temp_array = temp_array[radius:shape[0] + radius,
radius:shape[1] + radius]
clutter_array = np.ma.array(temp_array, mask=mask)
return clutter_array
def _clutter_to_dict(clutter_array):
""" Function that takes the clutter array
and turn it into a dictionary to be used and added
to the pyart radar object. """
clutter_dict = {}
clutter_dict['units'] = '1'
clutter_dict['data'] = clutter_array
clutter_dict['standard_name'] = 'ground_clutter'
clutter_dict['long_name'] = 'Ground Clutter'
clutter_dict['notes'] = '0: No Clutter, 1: Clutter'
return clutter_dict
| |
import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn.externals import joblib
import theano
import theano.tensor as T
from theano.sandbox.cuda.dnn import dnn_conv
from lib import activations
from lib import updates
from lib import inits
from lib.vis import color_grid_vis
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch
from load import faces
def transform(X):
X = [center_crop(x, npx) for x in X]
return floatX(X).transpose(0, 3, 1, 2)/127.5 - 1.
def inverse_transform(X):
X = (X.reshape(-1, nc, npx, npx).transpose(0, 2, 3, 1)+1.)/2.
return X
k = 1 # # of discrim updates for each gen update
l2 = 1e-5 # l2 weight decay
nvis = 196 # # of samples to visualize during training
b1 = 0.5 # momentum term of adam
nc = 3 # # of channels in image
nbatch = 128 # # of examples in batch
npx = 64 # # of pixels width/height of images
nz = 100 # # of dim for Z
ngf = 128 # # of gen filters in first conv layer
ndf = 128 # # of discrim filters in first conv layer
nx = npx*npx*nc # # of dimensions in X
niter = 25 # # of iter at starting learning rate
niter_decay = 0 # # of iter to linearly decay learning rate to zero
lr = 0.0002 # initial learning rate for adam
ntrain = 350000 # # of examples to train on
tr_data, te_data, tr_stream, val_stream, te_stream = faces(ntrain=ntrain)
tr_handle = tr_data.open()
vaX, = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)
desc = 'uncond_dcgan'
model_dir = 'models/%s'%desc
samples_dir = 'samples/%s'%desc
if not os.path.exists('logs/'):
os.makedirs('logs/')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)
gw = gifn((nz, ngf*8*4*4), 'gw')
gg = gain_ifn((ngf*8*4*4), 'gg')
gb = bias_ifn((ngf*8*4*4), 'gb')
gw2 = gifn((ngf*8, ngf*4, 5, 5), 'gw2')
gg2 = gain_ifn((ngf*4), 'gg2')
gb2 = bias_ifn((ngf*4), 'gb2')
gw3 = gifn((ngf*4, ngf*2, 5, 5), 'gw3')
gg3 = gain_ifn((ngf*2), 'gg3')
gb3 = bias_ifn((ngf*2), 'gb3')
gw4 = gifn((ngf*2, ngf, 5, 5), 'gw4')
gg4 = gain_ifn((ngf), 'gg4')
gb4 = bias_ifn((ngf), 'gb4')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
dw = difn((ndf, nc, 5, 5), 'dw')
dw2 = difn((ndf*2, ndf, 5, 5), 'dw2')
dg2 = gain_ifn((ndf*2), 'dg2')
db2 = bias_ifn((ndf*2), 'db2')
dw3 = difn((ndf*4, ndf*2, 5, 5), 'dw3')
dg3 = gain_ifn((ndf*4), 'dg3')
db3 = bias_ifn((ndf*4), 'db3')
dw4 = difn((ndf*8, ndf*4, 5, 5), 'dw4')
dg4 = gain_ifn((ndf*8), 'dg4')
db4 = bias_ifn((ndf*8), 'db4')
dwy = difn((ndf*8*4*4, 1), 'dwy')
gen_params = [gw, gg, gb, gw2, gg2, gb2, gw3, gg3, gb3, gw4, gg4, gb4, gwx]
discrim_params = [dw, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy]
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
h = h.reshape((h.shape[0], ngf*8, 4, 4))
h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
return x
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
return y
X = T.tensor4()
Z = T.matrix()
gX = gen(Z, *gen_params)
p_real = discrim(X, *discrim_params)
p_gen = discrim(gX, *discrim_params)
d_cost_real = bce(p_real, T.ones(p_real.shape)).mean()
d_cost_gen = bce(p_gen, T.zeros(p_gen.shape)).mean()
g_cost_d = bce(p_gen, T.ones(p_gen.shape)).mean()
d_cost = d_cost_real + d_cost_gen
g_cost = g_cost_d
cost = [g_cost, d_cost, g_cost_d, d_cost_real, d_cost_gen]
lrt = sharedX(lr)
d_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
g_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
d_updates = d_updater(discrim_params, d_cost)
g_updates = g_updater(gen_params, g_cost)
updates = d_updates + g_updates
print 'COMPILING'
t = time()
_train_g = theano.function([X, Z], cost, updates=g_updates)
_train_d = theano.function([X, Z], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
vis_idxs = py_rng.sample(np.arange(len(vaX)), nvis)
vaX_vis = inverse_transform(vaX[vis_idxs])
color_grid_vis(vaX_vis, (14, 14), 'samples/%s_etl_test.png'%desc)
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(nvis, nz)))
def gen_samples(n, nbatch=128):
samples = []
n_gen = 0
for i in range(n/nbatch):
zmb = floatX(np_rng.uniform(-1., 1., size=(nbatch, nz)))
xmb = _gen(zmb)
samples.append(xmb)
n_gen += len(xmb)
n_left = n-n_gen
zmb = floatX(np_rng.uniform(-1., 1., size=(n_left, nz)))
xmb = _gen(zmb)
samples.append(xmb)
return np.concatenate(samples, axis=0)
f_log = open('logs/%s.ndjson'%desc, 'wb')
log_fields = [
'n_epochs',
'n_updates',
'n_examples',
'n_seconds',
'1k_va_nnd',
'10k_va_nnd',
'100k_va_nnd',
'g_cost',
'd_cost',
]
vaX = vaX.reshape(len(vaX), -1)
print desc.upper()
n_updates = 0
n_check = 0
n_epochs = 0
n_updates = 0
n_examples = 0
t = time()
for epoch in range(niter):
for imb, in tqdm(tr_stream.get_epoch_iterator(), total=ntrain/nbatch):
imb = transform(imb)
zmb = floatX(np_rng.uniform(-1., 1., size=(len(imb), nz)))
if n_updates % (k+1) == 0:
cost = _train_g(imb, zmb)
else:
cost = _train_d(imb, zmb)
n_updates += 1
n_examples += len(imb)
g_cost = float(cost[0])
d_cost = float(cost[1])
gX = gen_samples(100000)
gX = gX.reshape(len(gX), -1)
va_nnd_1k = nnd_score(gX[:1000], vaX, metric='euclidean')
va_nnd_10k = nnd_score(gX[:10000], vaX, metric='euclidean')
va_nnd_100k = nnd_score(gX[:100000], vaX, metric='euclidean')
log = [n_epochs, n_updates, n_examples, time()-t, va_nnd_1k, va_nnd_10k, va_nnd_100k, g_cost, d_cost]
print '%.0f %.2f %.2f %.2f %.4f %.4f'%(epoch, va_nnd_1k, va_nnd_10k, va_nnd_100k, g_cost, d_cost)
f_log.write(json.dumps(dict(zip(log_fields, log)))+'\n')
f_log.flush()
samples = np.asarray(_gen(sample_zmb))
color_grid_vis(inverse_transform(samples), (14, 14), 'samples/%s/%d.png'%(desc, n_epochs))
n_epochs += 1
if n_epochs > niter:
lrt.set_value(floatX(lrt.get_value() - lr/niter_decay))
if n_epochs in [1, 2, 3, 4, 5, 10, 15, 20, 25]:
joblib.dump([p.get_value() for p in gen_params], 'models/%s/%d_gen_params.jl'%(desc, n_epochs))
joblib.dump([p.get_value() for p in discrim_params], 'models/%s/%d_discrim_params.jl'%(desc, n_epochs))
| |
"""Hypergeometric and Meijer G-functions"""
from __future__ import print_function, division
from sympy.core import S, I, pi, oo, ilcm, Mod
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.core.containers import Tuple
from sympy.core.compatibility import reduce, range
from sympy.core.mul import Mul
from sympy.core.symbol import Dummy
from sympy.functions import (sqrt, exp, log, sin, cos, asin, atan,
sinh, cosh, asinh, acosh, atanh, acoth)
class TupleArg(Tuple):
def limit(self, x, xlim, dir='+'):
""" Compute limit x->xlim.
"""
from sympy.series.limits import limit
return TupleArg(*[limit(f, x, xlim, dir) for f in self.args])
# TODO should __new__ accept **options?
# TODO should constructors should check if parameters are sensible?
def _prep_tuple(v):
"""
Turn an iterable argument V into a Tuple and unpolarify, since both
hypergeometric and meijer g-functions are unbranched in their parameters.
Examples
========
>>> from sympy.functions.special.hyper import _prep_tuple
>>> _prep_tuple([1, 2, 3])
(1, 2, 3)
>>> _prep_tuple((4, 5))
(4, 5)
>>> _prep_tuple((7, 8, 9))
(7, 8, 9)
"""
from sympy.simplify.simplify import unpolarify
return TupleArg(*[unpolarify(x) for x in v])
class TupleParametersBase(Function):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i))*m
return res + self.fdiff(3)*self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
class hyper(TupleParametersBase):
r"""
The (generalized) hypergeometric function is defined by a series where
the ratios of successive terms are a rational function of the summation
index. When convergent, it is continued analytically to the largest
possible domain.
The hypergeometric function depends on two vectors of parameters, called
the numerator parameters :math:`a_p`, and the denominator parameters
:math:`b_q`. It also has an argument :math:`z`. The series definition is
.. math ::
{}_pF_q\left(\begin{matrix} a_1, \dots, a_p \\ b_1, \dots, b_q \end{matrix}
\middle| z \right)
= \sum_{n=0}^\infty \frac{(a_1)_n \dots (a_p)_n}{(b_1)_n \dots (b_q)_n}
\frac{z^n}{n!},
where :math:`(a)_n = (a)(a+1)\dots(a+n-1)` denotes the rising factorial.
If one of the :math:`b_q` is a non-positive integer then the series is
undefined unless one of the `a_p` is a larger (i.e. smaller in
magnitude) non-positive integer. If none of the :math:`b_q` is a
non-positive integer and one of the :math:`a_p` is a non-positive
integer, then the series reduces to a polynomial. To simplify the
following discussion, we assume that none of the :math:`a_p` or
:math:`b_q` is a non-positive integer. For more details, see the
references.
The series converges for all :math:`z` if :math:`p \le q`, and thus
defines an entire single-valued function in this case. If :math:`p =
q+1` the series converges for :math:`|z| < 1`, and can be continued
analytically into a half-plane. If :math:`p > q+1` the series is
divergent for all :math:`z`.
Note: The hypergeometric function constructor currently does *not* check
if the parameters actually yield a well-defined function.
Examples
========
The parameters :math:`a_p` and :math:`b_q` can be passed as arbitrary
iterables, for example:
>>> from sympy.functions import hyper
>>> from sympy.abc import x, n, a
>>> hyper((1, 2, 3), [3, 4], x)
hyper((1, 2, 3), (3, 4), x)
There is also pretty printing (it looks better using unicode):
>>> from sympy import pprint
>>> pprint(hyper((1, 2, 3), [3, 4], x), use_unicode=False)
_
|_ /1, 2, 3 | \
| | | x|
3 2 \ 3, 4 | /
The parameters must always be iterables, even if they are vectors of
length one or zero:
>>> hyper((1, ), [], x)
hyper((1,), (), x)
But of course they may be variables (but if they depend on x then you
should not expect much implemented functionality):
>>> hyper((n, a), (n**2,), x)
hyper((n, a), (n**2,), x)
The hypergeometric function generalizes many named special functions.
The function hyperexpand() tries to express a hypergeometric function
using named special functions.
For example:
>>> from sympy import hyperexpand
>>> hyperexpand(hyper([], [], x))
exp(x)
You can also use expand_func:
>>> from sympy import expand_func
>>> expand_func(x*hyper([1, 1], [2], -x))
log(x + 1)
More examples:
>>> from sympy import S
>>> hyperexpand(hyper([], [S(1)/2], -x**2/4))
cos(x)
>>> hyperexpand(x*hyper([S(1)/2, S(1)/2], [S(3)/2], x**2))
asin(x)
We can also sometimes hyperexpand parametric functions:
>>> from sympy.abc import a
>>> hyperexpand(hyper([-a], [], x))
(-x + 1)**a
See Also
========
sympy.simplify.hyperexpand
sympy.functions.special.gamma_functions.gamma
meijerg
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Generalized_hypergeometric_function
"""
def __new__(cls, ap, bq, z):
# TODO should we check convergence conditions?
return Function.__new__(cls, _prep_tuple(ap), _prep_tuple(bq), z)
@classmethod
def eval(cls, ap, bq, z):
from sympy import unpolarify
if len(ap) <= len(bq):
nz = unpolarify(z)
if z != nz:
return hyper(ap, bq, nz)
def fdiff(self, argindex=3):
if argindex != 3:
raise ArgumentIndexError(self, argindex)
nap = Tuple(*[a + 1 for a in self.ap])
nbq = Tuple(*[b + 1 for b in self.bq])
fac = Mul(*self.ap)/Mul(*self.bq)
return fac*hyper(nap, nbq, self.argument)
def _eval_expand_func(self, **hints):
from sympy import gamma, hyperexpand
if len(self.ap) == 2 and len(self.bq) == 1 and self.argument == 1:
a, b = self.ap
c = self.bq[0]
return gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
return hyperexpand(self)
def _eval_rewrite_as_Sum(self, ap, bq, z):
from sympy.functions import factorial, RisingFactorial, Piecewise
from sympy import Sum
n = Dummy("n", integer=True)
rfap = Tuple(*[RisingFactorial(a, n) for a in ap])
rfbq = Tuple(*[RisingFactorial(b, n) for b in bq])
coeff = Mul(*rfap) / Mul(*rfbq)
return Piecewise((Sum(coeff * z**n / factorial(n), (n, 0, oo)),
self.convergence_statement), (self, True))
@property
def argument(self):
""" Argument of the hypergeometric function. """
return self.args[2]
@property
def ap(self):
""" Numerator parameters of the hypergeometric function. """
return Tuple(*self.args[0])
@property
def bq(self):
""" Denominator parameters of the hypergeometric function. """
return Tuple(*self.args[1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def eta(self):
""" A quantity related to the convergence of the series. """
return sum(self.ap) - sum(self.bq)
@property
def radius_of_convergence(self):
"""
Compute the radius of convergence of the defining series.
Note that even if this is not oo, the function may still be evaluated
outside of the radius of convergence by analytic continuation. But if
this is zero, then the function is not actually defined anywhere else.
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyper((1, 2), [3], z).radius_of_convergence
1
>>> hyper((1, 2, 3), [4], z).radius_of_convergence
0
>>> hyper((1, 2), (3, 4), z).radius_of_convergence
oo
"""
if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq):
aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True]
bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True]
if len(aints) < len(bints):
return S(0)
popped = False
for b in bints:
cancelled = False
while aints:
a = aints.pop()
if a >= b:
cancelled = True
break
popped = True
if not cancelled:
return S(0)
if aints or popped:
# There are still non-positive numerator parameters.
# This is a polynomial.
return oo
if len(self.ap) == len(self.bq) + 1:
return S(1)
elif len(self.ap) <= len(self.bq):
return oo
else:
return S(0)
@property
def convergence_statement(self):
""" Return a condition on z under which the series converges. """
from sympy import And, Or, re, Ne, oo
R = self.radius_of_convergence
if R == 0:
return False
if R == oo:
return True
# The special functions and their approximations, page 44
e = self.eta
z = self.argument
c1 = And(re(e) < 0, abs(z) <= 1)
c2 = And(0 <= re(e), re(e) < 1, abs(z) <= 1, Ne(z, 1))
c3 = And(re(e) >= 1, abs(z) < 1)
return Or(c1, c2, c3)
def _eval_simplify(self, ratio, measure):
from sympy.simplify.hyperexpand import hyperexpand
return hyperexpand(self)
def _sage_(self):
import sage.all as sage
ap = [arg._sage_() for arg in self.args[0]]
bq = [arg._sage_() for arg in self.args[1]]
return sage.hypergeometric(ap, bq, self.argument._sage_())
class meijerg(TupleParametersBase):
r"""
The Meijer G-function is defined by a Mellin-Barnes type integral that
resembles an inverse Mellin transform. It generalizes the hypergeometric
functions.
The Meijer G-function depends on four sets of parameters. There are
"*numerator parameters*"
:math:`a_1, \dots, a_n` and :math:`a_{n+1}, \dots, a_p`, and there are
"*denominator parameters*"
:math:`b_1, \dots, b_m` and :math:`b_{m+1}, \dots, b_q`.
Confusingly, it is traditionally denoted as follows (note the position
of `m`, `n`, `p`, `q`, and how they relate to the lengths of the four
parameter vectors):
.. math ::
G_{p,q}^{m,n} \left(\begin{matrix}a_1, \dots, a_n & a_{n+1}, \dots, a_p \\
b_1, \dots, b_m & b_{m+1}, \dots, b_q
\end{matrix} \middle| z \right).
However, in sympy the four parameter vectors are always available
separately (see examples), so that there is no need to keep track of the
decorating sub- and super-scripts on the G symbol.
The G function is defined as the following integral:
.. math ::
\frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j - s)
\prod_{j=1}^n \Gamma(1 - a_j + s)}{\prod_{j=m+1}^q \Gamma(1- b_j +s)
\prod_{j=n+1}^p \Gamma(a_j - s)} z^s \mathrm{d}s,
where :math:`\Gamma(z)` is the gamma function. There are three possible
contours which we will not describe in detail here (see the references).
If the integral converges along more than one of them the definitions
agree. The contours all separate the poles of :math:`\Gamma(1-a_j+s)`
from the poles of :math:`\Gamma(b_k-s)`, so in particular the G function
is undefined if :math:`a_j - b_k \in \mathbb{Z}_{>0}` for some
:math:`j \le n` and :math:`k \le m`.
The conditions under which one of the contours yields a convergent integral
are complicated and we do not state them here, see the references.
Note: Currently the Meijer G-function constructor does *not* check any
convergence conditions.
Examples
========
You can pass the parameters either as four separate vectors:
>>> from sympy.functions import meijerg
>>> from sympy.abc import x, a
>>> from sympy.core.containers import Tuple
>>> from sympy import pprint
>>> pprint(meijerg((1, 2), (a, 4), (5,), [], x), use_unicode=False)
__1, 2 /1, 2 a, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
or as two nested vectors:
>>> pprint(meijerg([(1, 2), (3, 4)], ([5], Tuple()), x), use_unicode=False)
__1, 2 /1, 2 3, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
As with the hypergeometric function, the parameters may be passed as
arbitrary iterables. Vectors of length zero and one also have to be
passed as iterables. The parameters need not be constants, but if they
depend on the argument then not much implemented functionality should be
expected.
All the subvectors of parameters are available:
>>> from sympy import pprint
>>> g = meijerg([1], [2], [3], [4], x)
>>> pprint(g, use_unicode=False)
__1, 1 /1 2 | \
/__ | | x|
\_|2, 2 \3 4 | /
>>> g.an
(1,)
>>> g.ap
(1, 2)
>>> g.aother
(2,)
>>> g.bm
(3,)
>>> g.bq
(3, 4)
>>> g.bother
(4,)
The Meijer G-function generalizes the hypergeometric functions.
In some cases it can be expressed in terms of hypergeometric functions,
using Slater's theorem. For example:
>>> from sympy import hyperexpand
>>> from sympy.abc import a, b, c
>>> hyperexpand(meijerg([a], [], [c], [b], x), allow_hyper=True)
x**c*gamma(-a + c + 1)*hyper((-a + c + 1,),
(-b + c + 1,), -x)/gamma(-b + c + 1)
Thus the Meijer G-function also subsumes many named functions as special
cases. You can use expand_func or hyperexpand to (try to) rewrite a
Meijer G-function in terms of named special functions. For example:
>>> from sympy import expand_func, S
>>> expand_func(meijerg([[],[]], [[0],[]], -x))
exp(x)
>>> hyperexpand(meijerg([[],[]], [[S(1)/2],[0]], (x/2)**2))
sin(x)/sqrt(pi)
See Also
========
hyper
sympy.simplify.hyperexpand
References
==========
.. [1] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
.. [2] http://en.wikipedia.org/wiki/Meijer_G-function
"""
def __new__(cls, *args):
if len(args) == 5:
args = [(args[0], args[1]), (args[2], args[3]), args[4]]
if len(args) != 3:
raise TypeError("args must eiter be as, as', bs, bs', z or "
"as, bs, z")
def tr(p):
if len(p) != 2:
raise TypeError("wrong argument")
return TupleArg(_prep_tuple(p[0]), _prep_tuple(p[1]))
# TODO should we check convergence conditions?
return Function.__new__(cls, tr(args[0]), tr(args[1]), args[2])
def fdiff(self, argindex=3):
if argindex != 3:
return self._diff_wrt_parameter(argindex[1])
if len(self.an) >= 1:
a = list(self.an)
a[0] -= 1
G = meijerg(a, self.aother, self.bm, self.bother, self.argument)
return 1/self.argument * ((self.an[0] - 1)*self + G)
elif len(self.bm) >= 1:
b = list(self.bm)
b[0] += 1
G = meijerg(self.an, self.aother, b, self.bother, self.argument)
return 1/self.argument * (self.bm[0]*self - G)
else:
return S.Zero
def _diff_wrt_parameter(self, idx):
# Differentiation wrt a parameter can only be done in very special
# cases. In particular, if we want to differentiate with respect to
# `a`, all other gamma factors have to reduce to rational functions.
#
# Let MT denote mellin transform. Suppose T(-s) is the gamma factor
# appearing in the definition of G. Then
#
# MT(log(z)G(z)) = d/ds T(s) = d/da T(s) + ...
#
# Thus d/da G(z) = log(z)G(z) - ...
# The ... can be evaluated as a G function under the above conditions,
# the formula being most easily derived by using
#
# d Gamma(s + n) Gamma(s + n) / 1 1 1 \
# -- ------------ = ------------ | - + ---- + ... + --------- |
# ds Gamma(s) Gamma(s) \ s s + 1 s + n - 1 /
#
# which follows from the difference equation of the digamma function.
# (There is a similar equation for -n instead of +n).
# We first figure out how to pair the parameters.
an = list(self.an)
ap = list(self.aother)
bm = list(self.bm)
bq = list(self.bother)
if idx < len(an):
an.pop(idx)
else:
idx -= len(an)
if idx < len(ap):
ap.pop(idx)
else:
idx -= len(ap)
if idx < len(bm):
bm.pop(idx)
else:
bq.pop(idx - len(bm))
pairs1 = []
pairs2 = []
for l1, l2, pairs in [(an, bq, pairs1), (ap, bm, pairs2)]:
while l1:
x = l1.pop()
found = None
for i, y in enumerate(l2):
if not Mod((x - y).simplify(), 1):
found = i
break
if found is None:
raise NotImplementedError('Derivative not expressible '
'as G-function?')
y = l2[i]
l2.pop(i)
pairs.append((x, y))
# Now build the result.
res = log(self.argument)*self
for a, b in pairs1:
sign = 1
n = a - b
base = b
if n < 0:
sign = -1
n = b - a
base = a
for k in range(n):
res -= sign*meijerg(self.an + (base + k + 1,), self.aother,
self.bm, self.bother + (base + k + 0,),
self.argument)
for a, b in pairs2:
sign = 1
n = b - a
base = a
if n < 0:
sign = -1
n = a - b
base = b
for k in range(n):
res -= sign*meijerg(self.an, self.aother + (base + k + 1,),
self.bm + (base + k + 0,), self.bother,
self.argument)
return res
def get_period(self):
"""
Return a number P such that G(x*exp(I*P)) == G(x).
>>> from sympy.functions.special.hyper import meijerg
>>> from sympy.abc import z
>>> from sympy import pi, S
>>> meijerg([1], [], [], [], z).get_period()
2*pi
>>> meijerg([pi], [], [], [], z).get_period()
oo
>>> meijerg([1, 2], [], [], [], z).get_period()
oo
>>> meijerg([1,1], [2], [1, S(1)/2, S(1)/3], [1], z).get_period()
12*pi
"""
# This follows from slater's theorem.
def compute(l):
# first check that no two differ by an integer
for i, b in enumerate(l):
if not b.is_Rational:
return oo
for j in range(i + 1, len(l)):
if not Mod((b - l[j]).simplify(), 1):
return oo
return reduce(ilcm, (x.q for x in l), 1)
beta = compute(self.bm)
alpha = compute(self.an)
p, q = len(self.ap), len(self.bq)
if p == q:
if beta == oo or alpha == oo:
return oo
return 2*pi*ilcm(alpha, beta)
elif p < q:
return 2*pi*beta
else:
return 2*pi*alpha
def _eval_expand_func(self, **hints):
from sympy import hyperexpand
return hyperexpand(self)
def _eval_evalf(self, prec):
# The default code is insufficient for polar arguments.
# mpmath provides an optional argument "r", which evaluates
# G(z**(1/r)). I am not sure what its intended use is, but we hijack it
# here in the following way: to evaluate at a number z of |argument|
# less than (say) n*pi, we put r=1/n, compute z' = root(z, n)
# (carefully so as not to loose the branch information), and evaluate
# G(z'**(1/r)) = G(z'**n) = G(z).
from sympy.functions import exp_polar, ceiling
from sympy import Expr
import mpmath
z = self.argument
znum = self.argument._eval_evalf(prec)
if znum.has(exp_polar):
znum, branch = znum.as_coeff_mul(exp_polar)
if len(branch) != 1:
return
branch = branch[0].args[0]/I
else:
branch = S(0)
n = ceiling(abs(branch/S.Pi)) + 1
znum = znum**(S(1)/n)*exp(I*branch / n)
# Convert all args to mpf or mpc
try:
[z, r, ap, bq] = [arg._to_mpmath(prec)
for arg in [znum, 1/n, self.args[0], self.args[1]]]
except ValueError:
return
with mpmath.workprec(prec):
v = mpmath.meijerg(ap, bq, z, r)
return Expr._from_mpmath(v, prec)
def integrand(self, s):
""" Get the defining integrand D(s). """
from sympy import gamma
return self.argument**s \
* Mul(*(gamma(b - s) for b in self.bm)) \
* Mul(*(gamma(1 - a + s) for a in self.an)) \
/ Mul(*(gamma(1 - b + s) for b in self.bother)) \
/ Mul(*(gamma(a - s) for a in self.aother))
@property
def argument(self):
""" Argument of the Meijer G-function. """
return self.args[2]
@property
def an(self):
""" First set of numerator parameters. """
return Tuple(*self.args[0][0])
@property
def ap(self):
""" Combined numerator parameters. """
return Tuple(*(self.args[0][0] + self.args[0][1]))
@property
def aother(self):
""" Second set of numerator parameters. """
return Tuple(*self.args[0][1])
@property
def bm(self):
""" First set of denominator parameters. """
return Tuple(*self.args[1][0])
@property
def bq(self):
""" Combined denominator parameters. """
return Tuple(*(self.args[1][0] + self.args[1][1]))
@property
def bother(self):
""" Second set of denominator parameters. """
return Tuple(*self.args[1][1])
@property
def _diffargs(self):
return self.ap + self.bq
@property
def nu(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return sum(self.bq) - sum(self.ap)
@property
def delta(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return len(self.bm) + len(self.an) - S(len(self.ap) + len(self.bq))/2
class HyperRep(Function):
"""
A base class for "hyper representation functions".
This is used exclusively in hyperexpand(), but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
@classmethod
def eval(cls, *args):
from sympy import unpolarify
newargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != newargs:
return cls(*newargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args):
from sympy import Piecewise
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
newargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S(1)/2
newerargs = newargs + (n,)
if minus:
small = self._expr_small_minus(*newargs)
big = self._expr_big_minus(*newerargs)
else:
small = self._expr_small(*newargs)
big = self._expr_big(*newerargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
class HyperRep_power1(HyperRep):
""" Return a representative for hyper([-a], [], z) == (1 - z)**a. """
@classmethod
def _expr_small(cls, a, x):
return (1 - x)**a
@classmethod
def _expr_small_minus(cls, a, x):
return (1 + x)**a
@classmethod
def _expr_big(cls, a, x, n):
if a.is_integer:
return cls._expr_small(a, x)
return (x - 1)**a*exp((2*n - 1)*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
if a.is_integer:
return cls._expr_small_minus(a, x)
return (1 + x)**a*exp(2*n*pi*I*a)
class HyperRep_power2(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
@classmethod
def _expr_small(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 - x))**(1 - 2*a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 + x))**(1 - 2*a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return 2**(2*a - 1)*(1 + sgn*I*sqrt(x - 1))**(1 - 2*a) \
*exp(-2*n*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return sgn*2**(2*a - 1)*(sqrt(1 + x) + sgn)**(1 - 2*a)*exp(-2*pi*I*a*n)
class HyperRep_log1(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2*n - 1)*pi*I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2*n*pi*I
class HyperRep_atanh(HyperRep):
""" Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, x):
return atanh(sqrt(x))/sqrt(x)
def _expr_small_minus(cls, x):
return atan(sqrt(x))/sqrt(x)
def _expr_big(cls, x, n):
if n.is_even:
return (acoth(sqrt(x)) + I*pi/2)/sqrt(x)
else:
return (acoth(sqrt(x)) - I*pi/2)/sqrt(x)
def _expr_big_minus(cls, x, n):
if n.is_even:
return atan(sqrt(x))/sqrt(x)
else:
return (atan(sqrt(x)) - pi)/sqrt(x)
class HyperRep_asin1(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z))/sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z))/sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S(-1)**n*((S(1)/2 - n)*pi/sqrt(z) + I*acosh(sqrt(z))/sqrt(z))
@classmethod
def _expr_big_minus(cls, z, n):
return S(-1)**n*(asinh(sqrt(z))/sqrt(z) + n*pi*I/sqrt(z))
class HyperRep_asin2(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) \
/HyperRep_power1._expr_small(S(1)/2, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) \
/HyperRep_power1._expr_small_minus(S(1)/2, z)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) \
/HyperRep_power1._expr_big(S(1)/2, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) \
/HyperRep_power1._expr_big_minus(S(1)/2, z, n)
class HyperRep_sqrts1(HyperRep):
""" Return a representative for hyper([-a, 1/2 - a], [1/2], z). """
@classmethod
def _expr_small(cls, a, z):
return ((1 - sqrt(z))**(2*a) + (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return (1 + z)**a*cos(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return ((sqrt(z) + 1)**(2*a)*exp(2*pi*I*n*a) +
(sqrt(z) - 1)**(2*a)*exp(2*pi*I*(n - 1)*a))/2
else:
n -= 1
return ((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) +
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))/2
@classmethod
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_sqrts2(HyperRep):
""" Return a representative for
sqrt(z)/2*[(1-sqrt(z))**2a - (1 + sqrt(z))**2a]
== -2*z/(2*a+1) d/dz hyper([-a - 1/2, -a], [1/2], z)"""
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)*((1 - sqrt(z))**(2*a) - (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return sqrt(z)*(1 + z)**a*sin(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n - 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
else:
n -= 1
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z)*sin(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z) \
*sin(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_log2(HyperRep):
""" Represent log(1/2 + sqrt(1 - z)/2) == -z/4*hyper([3/2, 1, 1], [2, 2], z) """
@classmethod
def _expr_small(cls, z):
return log(S(1)/2 + sqrt(1 - z)/2)
@classmethod
def _expr_small_minus(cls, z):
return log(S(1)/2 + sqrt(1 + z)/2)
@classmethod
def _expr_big(cls, z, n):
if n.is_even:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) + I*asin(1/sqrt(z))
else:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) - I*asin(1/sqrt(z))
def _expr_big_minus(cls, z, n):
if n.is_even:
return pi*I*n + log(S(1)/2 + sqrt(1 + z)/2)
else:
return pi*I*n + log(sqrt(1 + z)/2 - S(1)/2)
class HyperRep_cosasin(HyperRep):
""" Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """
# Note there are many alternative expressions, e.g. as powers of a sum of
# square roots.
@classmethod
def _expr_small(cls, a, z):
return cos(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return cosh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return cosh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return cosh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
class HyperRep_sinasin(HyperRep):
""" Represent 2*a*z*hyper([1 - a, 1 + a], [3/2], z)
== sqrt(z)/sqrt(1-z)*sin(2*a*asin(sqrt(z))) """
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)/sqrt(1 - z)*sin(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return -sqrt(z)/sqrt(1 + z)*sinh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return -1/sqrt(1 - 1/z)*sinh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return -1/sqrt(1 + 1/z)*sinh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
| |
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import os
from envisage.ui.tasks.task_extension import TaskExtension
from envisage.ui.tasks.task_factory import TaskFactory
from pyface.tasks.action.schema import SMenu, SGroup
from pyface.tasks.action.schema_addition import SchemaAddition
from traits.api import Any
from pychron.core.helpers.filetools import glob_list_directory
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.envisage.tasks.list_actions import SpectrometerScriptAction, HopsAction
from pychron.paths import paths
from pychron.spectrometer.base_spectrometer_manager import BaseSpectrometerManager
from pychron.spectrometer.ion_optics.ion_optics_manager import IonOpticsManager
from pychron.spectrometer.readout_view import ReadoutView
from pychron.spectrometer.scan_manager import ScanManager
from pychron.spectrometer.tasks.spectrometer_actions import (
PopulateMFTableAction,
SendConfigAction,
ViewReadoutAction,
EditGainsAction,
ReloadMFTableAction,
MagnetFieldTableAction,
MagnetFieldTableHistoryAction,
ToggleSpectrometerTask,
PeakCenterAction,
DefinePeakCenterAction,
CoincidenceScanAction,
SpectrometerParametersAction,
)
from pychron.spectrometer.tasks.spectrometer_preferences import (
SpectrometerPreferencesPane,
)
from pychron.spectrometer.tasks.spectrometer_task import SpectrometerTask
class BaseSpectrometerPlugin(BaseTaskPlugin):
spectrometer_manager = Any
spectrometer_manager_klass = None
task_klass = SpectrometerTask
manager_name = ""
scan_manager = Any
ion_optics_manager = Any
def start(self):
super(BaseSpectrometerPlugin, self).start()
if self.spectrometer_manager:
self.spectrometer_manager.spectrometer.start()
# ===============================================================================
# tests
# ===============================================================================
def test_communication(self):
manager = self.spectrometer_manager
return manager.test_connection()
def test_intensity(self):
manager = self.spectrometer_manager
ret = manager.test_connection(force=False)
if ret and ret[0]:
ret = manager.test_intensity()
return ret
# def _inspector_task_factory(self):
# from pychron.spectrometer.tasks.inspector.scan_inspector_task import ScanInspectorTask
#
# t = ScanInspectorTask()
# return t
def _mass_cal_task_factory(self):
from pychron.spectrometer.tasks.mass_cal.mass_calibration_task import (
MassCalibrationTask,
)
t = MassCalibrationTask(spectrometer_manager=self.spectrometer_manager)
return t
def _task_factory(self):
t = self.task_klass(
manager=self.spectrometer_manager,
scan_manager=self.scan_manager,
application=self.application,
)
return t
def _factory_spectrometer(self):
return self.spectrometer_manager
def _factory_ion_optics(self):
return self.ion_optics_manager
def _factory_scan_manager(self):
return self.scan_manager
def _tasks_default(self):
ts = [
TaskFactory(
id="pychron.spectrometer",
task_group="hardware",
factory=self._task_factory,
accelerator="Ctrl+'",
name="Spectrometer",
image="spectrum_emission",
),
TaskFactory(
id="pychron.mass_calibration",
factory=self._mass_cal_task_factory,
name="Mass Calibration",
accelerator="Ctrl+Shift+M",
),
# TaskFactory(id='pychron.spectrometer.scan_inspector',
# factory=self._inspector_task_factory,
# name='Scan Inspector')
]
return ts
def _service_offers_default(self):
""" """
so = self.service_offer_factory(
protocol=BaseSpectrometerManager, factory=self._factory_spectrometer
)
so1 = self.service_offer_factory(
protocol=IonOpticsManager, factory=self._factory_ion_optics
)
so2 = self.service_offer_factory(
protocol=ScanManager, factory=self._factory_scan_manager
)
so3 = self.service_offer_factory(
protocol=ReadoutView, factory=self._readout_view_factory
)
return [so, so1, so2, so3]
def _preferences_default(self):
return self._preferences_factory("spectrometer")
def _preferences_panes_default(self):
return [SpectrometerPreferencesPane]
def _readout_view_factory(self):
v = ReadoutView(spectrometer=self.spectrometer_manager.spectrometer)
return v
def _managers_default(self):
""" """
return [
dict(
name=self.manager_name,
plugin_name=self.name,
manager=self.spectrometer_manager,
)
]
def _spectrometer_manager_default(self):
sm = self.spectrometer_manager_klass(application=self.application)
return sm
def _ion_optics_manager_default(self):
im = IonOpticsManager(
application=self.application,
spectrometer=self.spectrometer_manager.spectrometer,
)
return im
def _scan_manager_default(self):
sm = ScanManager(
application=self.application,
spectrometer=self.spectrometer_manager.spectrometer,
ion_optics_manager=self.ion_optics_manager,
)
return sm
def _hops_ext(self):
def hop_action(name):
def func():
return HopsAction(name=name, hop_name=name)
return func
actions = []
for f in glob_list_directory(
paths.hops_dir, extension=".yaml", remove_extension=True
):
actions.append(
SchemaAddition(
id="procedure.{}".format(f),
factory=hop_action(f),
path="MenuBar/procedures.menu/hops.group",
)
)
if actions:
# m = SchemaAddition(id='procedures.menu',
# before='window.menu',
# after='tools.menu',
# factory=lambda: SMenu(name='Procedures', id='procedures.menu'),
# path='MenuBar')
g = SchemaAddition(
id="hops.group",
factory=lambda: SGroup(name="Hops", id="hops.group"),
path="MenuBar/procedures.menu",
)
actions.insert(0, g)
# actions.insert(0, m)
ext = TaskExtension(actions=actions)
return ext
def _scripts_ext(self):
def script_action(name):
def func():
p = os.path.join(paths.spectrometer_scripts_dir, "{}.py".format(name))
return SpectrometerScriptAction(name=name, script_path=p)
return func
actions = []
for f in glob_list_directory(
paths.spectrometer_scripts_dir, extension=".py", remove_extension=True
):
actions.append(
SchemaAddition(
id="spectrometer_script.{}".format(f),
factory=script_action(f),
path="MenuBar/procedures.menu/spectrometer_script.group",
)
)
if actions:
# m = SchemaAddition(id='procedures.menu',
# before='window.menu',
# after='tools.menu',
# factory=lambda: SMenu(name='Procedures', id='procedures.menu'),
# path='MenuBar')
g = SchemaAddition(
id="spectrometer_script.group",
factory=lambda: SGroup(
name="Spectrometer", id="spectrometer_script.group"
),
path="MenuBar/procedures.menu",
)
actions.insert(0, g)
# actions.insert(0, m)
ext = TaskExtension(actions=actions)
return ext
def _task_extensions_default(self):
ext = []
hopext = self._hops_ext()
if hopext:
ext.append(hopext)
scriptext = self._scripts_ext()
if scriptext:
ext.append(scriptext)
ta1 = TaskExtension(
actions=[
SchemaAddition(
id="spectrometer.menu",
factory=lambda: SMenu(id="spectrometer.menu", name="Spectrometer"),
path="MenuBar",
before="window.menu",
after="tools.menu",
),
SchemaAddition(
id="procedures.menu",
before="window.menu",
after="spectrometer.menu",
factory=lambda: SMenu(name="Procedures", id="procedures.menu"),
path="MenuBar",
),
SchemaAddition(
id="update_mftable",
path="MenuBar/spectrometer.menu",
factory=PopulateMFTableAction,
),
SchemaAddition(
id="send_config",
factory=SendConfigAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="view_readout",
factory=ViewReadoutAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="edit_gains",
factory=EditGainsAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="relood_table",
factory=ReloadMFTableAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="mftable",
factory=MagnetFieldTableAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="mftable_history",
factory=MagnetFieldTableHistoryAction,
path="MenuBar/spectrometer.menu",
),
]
)
si = TaskExtension(
task_id="pychron.spectrometer.scan_inspector",
actions=[
SchemaAddition(
id="toggle_spectrometer_task",
factory=ToggleSpectrometerTask,
path="MenuBar/spectrometer.menu",
)
],
)
sp = TaskExtension(
task_id="pychron.spectrometer",
actions=[
SchemaAddition(
id="toggle_spectrometer_task",
factory=ToggleSpectrometerTask,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="peak_center",
factory=PeakCenterAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="define_peak_center",
factory=DefinePeakCenterAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="coincidence",
factory=CoincidenceScanAction,
path="MenuBar/spectrometer.menu",
),
SchemaAddition(
id="parameters",
factory=SpectrometerParametersAction,
path="MenuBar/spectrometer.menu",
),
],
)
ext.extend((ta1, si, sp))
return ext
# ============= EOF =============================================
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApplicationSecurityGroupsOperations:
"""ApplicationSecurityGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
application_security_group_name: str,
**kwargs: Any
) -> "_models.ApplicationSecurityGroup":
"""Gets information about the specified application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
application_security_group_name: str,
parameters: "_models.ApplicationSecurityGroup",
**kwargs: Any
) -> "_models.ApplicationSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApplicationSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
application_security_group_name: str,
parameters: "_models.ApplicationSecurityGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.ApplicationSecurityGroup"]:
"""Creates or updates an application security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to the create or update ApplicationSecurityGroup
operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ApplicationSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
application_security_group_name=application_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
application_security_group_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ApplicationSecurityGroup":
"""Updates an application security group's tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param application_security_group_name: The name of the application security group.
:type application_security_group_name: str
:param parameters: Parameters supplied to update application security group tags.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApplicationSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'applicationSecurityGroupName': self._serialize.url("application_security_group_name", application_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ApplicationSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationSecurityGroupListResult"]:
"""Gets all application security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ApplicationSecurityGroupListResult"]:
"""Gets all the application security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.ApplicationSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ApplicationSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationSecurityGroups'} # type: ignore
| |
import pymongo
import datetime
import difflib
MongoClient = pymongo.MongoClient
client = MongoClient()
db = client.wes_api
events = db.events
usdan_menus = db.usdan_menus
summerfields_menu = db.summerfields_menu
late_night_menu = db.late_night_menu
red_and_black_menu = db.red_and_black_menu
weswings_menu = db.weswings_menu
s_and_c_menu = db.s_and_c_menu
film_series = db.film_series
directory = db.directory
"""
GENERAL METHODS
"""
def get_status():
return list(db.status.find())
"""
EVENTS SEARCH
"""
def limit_results(numResults, results):
"""
Limits results to numResults
"""
if numResults < results.count():
return list(results[0:numResults])
else:
return list(results)
def get_events(numEvents, source=None):
"""
Returns numEvents MAX latest events from the DB
"""
if source:
results = db.events.find({'source': source})
else:
results = db.events.find()
if results.count() == 0:
print "SEARCH: Found no events"
return None
sorted_results = results.sort('time', pymongo.DESCENDING)
return limit_results(numEvents, sorted_results)
def get_events_today():
now = datetime.datetime.today()
today = datetime.datetime(now.year, now.month, now.day)
tomorrow = today + datetime.timedelta(days=1)
search_results = events.find({"time": {"$gte": today, "$lt": tomorrow}})
if search_results.count() == 0:
print "SEARCH: Found no events for today."
return None
return list(search_results)
def day_floor(date):
return datetime.datetime(year=date.year, month=date.month, day=date.day)
def closest_results(query_string, results, key):
"""
For every element 'i' in results (mongo db results)
compute the closest dist (query_string,i['key']) and sort results,
in ascending order.
"""
# Max dist allowed to keep a result
MIN_SCORE = 0.5
# Edit distances for each
# res = [(Levenshtein.distance(unicode(query_string.lower()),i[key].lower()),i) for i in results]
res = [(difflib.SequenceMatcher(None, unicode(
query_string.lower()), i[key].lower()).ratio(), i) for i in results]
res.sort()
# Filter out the scores (could in the future return some sort of match
# score)
res2 = map(lambda x: x[1], filter(lambda y: y[0] >= MIN_SCORE, res))
return res2
def search_events(numResults, title_query, location_query,
time_from, time_until, category_query, source):
"""
To minimize total search space, search hierarchically.
Using difflib SequenceMatcher to find best match for title,
location, and category. Since the amount of event
data (for now) is relatively small, people can just
do whatever more-serious searching they want on the
front end once they have the data.
Order of search:
-source
-category_query
-time_from and time_until
-location_query
-title_query
Then restrict results to numResults.
"""
# Source Filter
if source:
search_results = db.events.find({'source': source})
else:
search_results = db.events.find()
if not search_results:
print "SEARCH: NO events found in search events"
return None
# Category Filter
if category_query:
search_results_2 = closest_results(
category_query, search_results, "category")
else:
search_results_2 = list(search_results)
# Time Filter (by day)
# So need the time floor
if time_from and not time_until:
search_results_3 = [
i for i in search_results_2 if day_floor(i['time']) >= time_from]
# grab from beginning to time_until
elif not time_from and time_until:
search_results_3 = [
i for i in search_results_2 if day_floor(i['time']) <= time_until]
# grab time_from to time_until
elif time_from and time_until:
search_results_3 = [i for i in search_results_2 if day_floor(
i['time']) >= time_from and day_floor(i['time']) <= time_until]
else:
search_results_3 = search_results_2
# Location Filter
if location_query:
search_results_4 = closest_results(
location_query, search_results_3, "location")
# [i if i['location'].lower() == lower_loc for i in search_results_3]
else:
search_results_4 = search_results_3
# Title Filter
if title_query:
search_results_5 = closest_results(
title_query, search_results_4, "name")
# [i if i['location'].lower() == lower_loc for i in search_results_3]
else:
search_results_5 = search_results_4
# Limit results
if len(search_results_5) > numResults:
return list(search_results_5[0:numResults])
return list(search_results_5)
"""
MENUS SEARCH
"""
def get_menu_usdan():
now = datetime.datetime.today()
today = datetime.datetime(now.year, now.month, now.day)
tomorrow = today + datetime.timedelta(days=1)
usdan = get_menu_usdan_search(1, today, tomorrow)
if not usdan:
return []
return list(usdan)
def get_menu_usdan_search(numResults, time_from=None, time_until=None):
# grab time_from to present
if time_from and not time_until:
usdan_results = usdan_menus.find({"time": {"$gte": time_from}})
# grab from beginning to time_until
elif not time_from and time_until:
usdan_results = usdan_menus.find({"time": {"$lte": time_until}})
# grab time_from to time_until
elif time_from and time_until:
usdan_results = usdan_menus.find(
{"time": {"$lt": time_until, "$gte": time_from}})
# grab all
else:
usdan_results = usdan_menus.find()
if usdan_results.count() == 0:
print "SEARCH: Found no usdan meals"
return None
sorted_results = usdan_results.sort('time', pymongo.DESCENDING)
return limit_results(numResults, sorted_results)
def get_menu_static(target):
"""
Not worried about time here since these menus
don't change on a daily basis.
"""
dbs = {"summerfields": summerfields_menu,
"redandblack": red_and_black_menu,
"weswings": weswings_menu,
"sandc": s_and_c_menu,
"latenight": late_night_menu}
target_db = dbs.get(target)
if not target_db:
print "SEARCH: Found no such static menu:", target
return None
results = target_db.find()
if results.count() == 0:
print "SEARCH: Found no static meals for:", target
return None
return list(results)
"""
FILM SERIES SEARCH
"""
def get_film_series_all():
search_results = film_series.find()
if search_results.count() == 0:
print "SEARCH: Found no film series at all...?"
return None
return list(search_results.sort('data.time', pymongo.ASCENDING))
def get_film_series_today():
now = datetime.datetime.today()
today = datetime.datetime(now.year, now.month, now.day)
tomorrow = today + datetime.timedelta(days=1)
search_results = film_series.find(
{"data.time": {"$gte": today, "$lt": tomorrow}})
if search_results.count() == 0:
print "SEARCH: Found no film for today."
return None
return list(search_results)
"""
DIRECTORY SEARCH
"""
def get_directory():
search_results = directory.find()
if search_results.count() == 0:
print "SEARCH: Found no directory results."
return None
print search_results[0]
return list(search_results)
| |
import importlib
import os
from pathlib import Path
import pytest
from fastapi.testclient import TestClient
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Users Users Get",
"type": "array",
"items": {"$ref": "#/components/schemas/User"},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Users",
"operationId": "read_users_users__get",
"parameters": [
{
"required": False,
"schema": {"title": "Skip", "type": "integer", "default": 0},
"name": "skip",
"in": "query",
},
{
"required": False,
"schema": {"title": "Limit", "type": "integer", "default": 100},
"name": "limit",
"in": "query",
},
],
},
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/User"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create User",
"operationId": "create_user_users__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/UserCreate"}
}
},
"required": True,
},
},
},
"/users/{user_id}": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/User"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read User",
"operationId": "read_user_users__user_id__get",
"parameters": [
{
"required": True,
"schema": {"title": "User Id", "type": "integer"},
"name": "user_id",
"in": "path",
}
],
}
},
"/users/{user_id}/items/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Create Item For User",
"operationId": "create_item_for_user_users__user_id__items__post",
"parameters": [
{
"required": True,
"schema": {"title": "User Id", "type": "integer"},
"name": "user_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/ItemCreate"}
}
},
"required": True,
},
}
},
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Read Items Items Get",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"parameters": [
{
"required": False,
"schema": {"title": "Skip", "type": "integer", "default": 0},
"name": "skip",
"in": "query",
},
{
"required": False,
"schema": {"title": "Limit", "type": "integer", "default": 100},
"name": "limit",
"in": "query",
},
],
}
},
},
"components": {
"schemas": {
"ItemCreate": {
"title": "ItemCreate",
"required": ["title"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"Item": {
"title": "Item",
"required": ["title", "id", "owner_id"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
"id": {"title": "Id", "type": "integer"},
"owner_id": {"title": "Owner Id", "type": "integer"},
},
},
"User": {
"title": "User",
"required": ["email", "id", "is_active"],
"type": "object",
"properties": {
"email": {"title": "Email", "type": "string"},
"id": {"title": "Id", "type": "integer"},
"is_active": {"title": "Is Active", "type": "boolean"},
"items": {
"title": "Items",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
"default": [],
},
},
},
"UserCreate": {
"title": "UserCreate",
"required": ["email", "password"],
"type": "object",
"properties": {
"email": {"title": "Email", "type": "string"},
"password": {"title": "Password", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
@pytest.fixture(scope="module")
def client(tmp_path_factory: pytest.TempPathFactory):
tmp_path = tmp_path_factory.mktemp("data")
cwd = os.getcwd()
os.chdir(tmp_path)
test_db = Path("./sql_app.db")
if test_db.is_file(): # pragma: nocover
test_db.unlink()
# Import while creating the client to create the DB after starting the test session
from docs_src.sql_databases.sql_app import main
# Ensure import side effects are re-executed
importlib.reload(main)
with TestClient(main.app) as c:
yield c
if test_db.is_file(): # pragma: nocover
test_db.unlink()
os.chdir(cwd)
def test_openapi_schema(client):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_create_user(client):
test_user = {"email": "johndoe@example.com", "password": "secret"}
response = client.post("/users/", json=test_user)
assert response.status_code == 200, response.text
data = response.json()
assert test_user["email"] == data["email"]
assert "id" in data
response = client.post("/users/", json=test_user)
assert response.status_code == 400, response.text
def test_get_user(client):
response = client.get("/users/1")
assert response.status_code == 200, response.text
data = response.json()
assert "email" in data
assert "id" in data
def test_inexistent_user(client):
response = client.get("/users/999")
assert response.status_code == 404, response.text
def test_get_users(client):
response = client.get("/users/")
assert response.status_code == 200, response.text
data = response.json()
assert "email" in data[0]
assert "id" in data[0]
def test_create_item(client):
item = {"title": "Foo", "description": "Something that fights"}
response = client.post("/users/1/items/", json=item)
assert response.status_code == 200, response.text
item_data = response.json()
assert item["title"] == item_data["title"]
assert item["description"] == item_data["description"]
assert "id" in item_data
assert "owner_id" in item_data
response = client.get("/users/1")
assert response.status_code == 200, response.text
user_data = response.json()
item_to_check = [it for it in user_data["items"] if it["id"] == item_data["id"]][0]
assert item_to_check["title"] == item["title"]
assert item_to_check["description"] == item["description"]
response = client.get("/users/1")
assert response.status_code == 200, response.text
user_data = response.json()
item_to_check = [it for it in user_data["items"] if it["id"] == item_data["id"]][0]
assert item_to_check["title"] == item["title"]
assert item_to_check["description"] == item["description"]
def test_read_items(client):
response = client.get("/items/")
assert response.status_code == 200, response.text
data = response.json()
assert data
first_item = data[0]
assert "title" in first_item
assert "description" in first_item
| |
"""
Unit tests for the sumatra.projects module
"""
from __future__ import with_statement
from __future__ import unicode_literals
from builtins import object
import datetime
import shutil
import os
import sys
import tempfile
import unittest
from future.utils import with_metaclass
import sumatra.projects
from sumatra.projects import Project, load_project
from sumatra.core import SingletonType
class MockDiffFormatter(object):
def __init__(self, diff):
pass
def format(self, mode):
return ""
sumatra.projects.get_diff_formatter = lambda: MockDiffFormatter
class MockRepository(with_metaclass(SingletonType, object)):
url = "http://svn.example.com"
vcs_type = 'git'
use_version_cmd = ''
upstream = ''
path_of_working_copy = None
def __deepcopy__(self, memo):
return self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
return {}
def get_working_copy(self):
return MockWorkingCopy(self.path_of_working_copy)
def __hash__(self):
return 0
class MockWorkingCopy(object):
repository = MockRepository()
def __init__(self, path):
self.path = path
def has_changed(self):
return False
def use_latest_version(self):
pass
def current_version(self):
return 999
def use_version(self, v):
pass
def contains(self, path):
return True
def get_username(self):
return "The Knights Who Say Ni"
def reset(self):
pass
class MockExecutable(object):
name = "Python"
path = sys.executable # "/usr/local/bin/python"
version = sys.version
requires_script = True
options = ''
def write_parameters(self, params, filename):
pass
def __getstate__(self):
return {}
class MockLaunchMode(object):
working_directory = '/foo/bar'
def get_platform_information(self):
return []
def pre_run(self, prog):
pass
def run(self, prog, script, params, append_label):
return True
def __getstate__(self):
return {}
class MockSet(object):
def __iter__(self):
return iter(['foo'])
def add(self, x):
self.added = x
def remove(self, x):
self.removed = x
class MockRecord(object):
def __init__(self, label):
self.label = label
self.tags = MockSet()
self.parameters = {}
self.repository = MockRepository()
self.input_data = []
self.script_arguments = "-q"
self.executable = MockExecutable()
self.main_file = "admin.php"
self.version = "42"
self.launch_mode = MockLaunchMode()
self.outcome = ""
self.timestamp = datetime.datetime(2042, 1, 23)
self.user = 'user'
self.duration = 2.3
self.datastore = MockDatastore()
self.input_datastore = MockDatastore()
self.platforms = []
self.dependencies = []
self.reason = 'Because'
self.repeats = None
self.diff = ''
self.command_line = '/path/to/program main.script'
self.stdout_stderr = ''
self.output_data = []
def difference(r1, r2, igm, igf):
return ""
def add_tag(self, tag):
self.tags.add(tag)
class MockDatastore(object):
def __init__(self):
self.root = '/tmp/foo/bar'
def __getstate__(self):
return {}
class MockRecordStore(object):
def save(self, project_name, record):
pass
def get(self, project_name, label):
if label != "none_existent":
return MockRecord(label=label*2)
else:
raise Exception()
def list(self, project_name, tags=None):
return [self.get(project_name, 'foo_label'),
self.get(project_name, 'bar_label')]
def delete(self, project_name, label):
self.deleted = label
def delete_by_tag(self, project_name, tag):
return "".join(reversed(tag))
def most_recent(self, project):
return "last"
def __getstate__(self):
return {}
def backup(self):
pass
def labels(self, project_name, tags=None):
return [self.get(project_name, 'foo_label').label,
self.get(project_name, 'bar_label').label]
class TestProject(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp(prefix='sumatra-test-')
self.cwd_before_test = os.getcwd()
os.chdir(self.dir)
MockRepository().path_of_working_copy = self.dir # set path of Singleton
def tearDown(self):
os.chdir(self.cwd_before_test)
shutil.rmtree(self.dir)
def write_test_script(self, filename):
with open(filename, "w") as f:
f.write("a=2\n")
write_test_script.__test__ = False # stop nose treating this as a test
def test__init__with_minimal_arguments(self):
Project("test_project", record_store=MockRecordStore())
def test__creating_a_second_project_in_the_same_dir_should_raise_an_exception(self):
Project("test_project1", record_store=MockRecordStore())
self.assertRaises(Exception, Project, "test_project2")
def test__info(self):
proj = Project("test_project", record_store=MockRecordStore())
proj.info()
def test_new_record_with_minimal_args_should_set_defaults(self):
self.write_test_script("test.py")
proj = Project("test_project",
record_store=MockRecordStore(),
default_main_file="test.py",
default_executable=MockExecutable(),
default_launch_mode=MockLaunchMode(),
default_repository=MockRepository())
rec = proj.new_record()
self.assertEqual(rec.repository, proj.default_repository)
self.assertEqual(rec.main_file, "test.py")
def test_new_record_with_uuid_label_generator_should_generate_unique_id(self):
self.write_test_script("test.py")
proj = Project("test_project",
record_store=MockRecordStore(),
default_main_file="test.py",
default_executable=MockExecutable(),
default_launch_mode=MockLaunchMode(),
default_repository=MockRepository(),
label_generator='uuid')
rec1 = proj.new_record()
rec2 = proj.new_record()
self.assertNotEqual(rec1.label, rec2.label)
def test__update_code(self):
proj = Project("test_project",
record_store=MockRecordStore(),
default_repository=MockRepository())
wc = proj.default_repository.get_working_copy()
proj.update_code(wc, version=9369835)
def test_launch(self):
self.write_test_script("test.py")
proj = Project("test_project",
default_executable=MockExecutable(),
default_repository=MockRepository(),
default_launch_mode=MockLaunchMode(),
record_store=MockRecordStore())
proj.launch(main_file="test.py")
def test_format_records(self):
self.write_test_script("test.py")
proj = Project("test_project",
record_store=MockRecordStore(),
default_main_file="test.py",
default_executable=MockExecutable(),
default_launch_mode=MockLaunchMode(),
default_repository=MockRepository(),
label_generator='uuid')
rec1 = proj.new_record()
rec2 = proj.new_record()
self.assertEqual(proj.format_records('text'), 'foo_labelfoo_label\nbar_labelbar_label')
self.assertEqual(proj.format_records('html'), '<ul>\n<li>foo_labelfoo_label</li>\n<li>bar_labelbar_label</li>\n</ul>')
# TODO: Find a good way to check the output of the following formatters
# (currently we only check that we can call them without errors).
proj.format_records('latex', 'long')
proj.format_records('shell')
proj.format_records('json')
def test__get_record__calls_get_on_the_record_store(self):
proj = Project("test_project",
record_store=MockRecordStore())
self.assertEqual(proj.get_record("foo").label, "foofoo")
def test__delete_record__calls_delete_on_the_record_store(self):
proj = Project("test_project",
record_store=MockRecordStore())
proj.delete_record("foo")
self.assertEqual(proj.record_store.deleted, "foo")
def test__delete_by_tag__calls_delete_by_tag_on_the_record_store(self):
proj = Project("test_project",
record_store=MockRecordStore())
self.assertEqual(proj.delete_by_tag("foo"), "oof")
def test__add_comment__should_set_the_outcome_attribute_of_the_record(self):
proj = Project("test_project",
record_store=MockRecordStore())
proj.add_comment("foo", "comment goes here")
def test__add_tag__should_call_add_on_the_tags_attibute_of_the_record(self):
proj = Project("test_project",
record_store=MockRecordStore())
proj.add_tag("foo", "new_tag")
def test__remove_tag__should_call_remove_on_the_tags_attibute_of_the_record(self):
proj = Project("test_project",
record_store=MockRecordStore())
proj.remove_tag("foo", "new_tag")
def test__show_diff(self):
proj = Project("test_project",
record_store=MockRecordStore())
proj.show_diff("foo", "bar")
def delete_record__should_update_most_recent(self):
"""see ticket:36."""
proj = Project("test_project",
record_store=MockRecordStore())
proj.add_record(MockRecord("record1"))
self.assertEqual(proj._most_recent, "record1")
proj.add_record(MockRecord("record2"))
self.assertEqual(proj._most_recent, "record2")
proj.delete_record("record2")
self.assertEqual(proj._most_recent, "last") # should really be "record1", but we are not testing RecordStore here
def test__backup(self):
def fake_copytree(source, target):
pass
orig_copytree = shutil.copytree
shutil.copytree = fake_copytree
proj = Project("test_project",
record_store=MockRecordStore())
backup_dir = proj.backup()
shutil.copytree = orig_copytree
assert "backup" in backup_dir
def test__repeat(self):
orig_gwc = sumatra.projects.get_working_copy
sumatra.projects.get_working_copy = lambda: MockWorkingCopy(self.dir)
orig_launch = Project.launch
Project.launch = lambda self, **kwargs: "new_record"
proj = Project("test_project",
record_store=MockRecordStore())
proj.add_record(MockRecord("record1"))
proj.add_record(MockRecord("record2"))
self.assertEqual(proj.repeat("record1")[0], "new_record")
sumatra.projects.get_working_copy = orig_gwc
Project.launch = orig_launch
class TestModuleFunctions(unittest.TestCase):
def tearDown(self):
if os.path.exists(".smt"):
shutil.rmtree(".smt")
if os.path.exists("Data"):
os.rmdir("Data")
def test__load_project__should_return_Project(self):
proj1 = Project("test_project", record_store=MockRecordStore())
assert os.path.exists(".smt/project")
proj2 = load_project()
self.assertEqual(proj1.name, proj2.name)
def test__load_project_should_raise_exception_if_no_project_in_current_dir(self):
self.assertRaises(Exception, load_project)
if __name__ == '__main__':
unittest.main()
| |
import os
import json
import string
import random
import nova_api
def cinder_request(self,
url_detail,
request_type='get',
request_name=None,
data=None,
locust_name=None):
url = self.get_endpoint('volumev2')
if url_detail:
url = os.path.join(url, url_detail)
headers = {'X-Auth-Project-Id': self.keystone_tenant,
'X-Auth-Token': self.auth_token,
'Content-Type': 'application/json',
'Accept': 'application/json'}
if data:
response = getattr(self.client, request_type)(url,
headers=headers,
data=json.dumps(data),
name=locust_name)
else:
response = getattr(self.client, request_type)(url,
headers=headers,
name=locust_name)
self.output(url)
self.output("Response status code: %s" % response.status_code)
self.output("Response content: %s" % response.content)
return response
def cinder_get_volume_id(self):
""" Return a random volume from currently
available volumes
"""
response = cinder_request(self, 'volumes', 'get')
volume_list = json.loads(response.content)['volumes']
volume_id = random.choice([i['id'] for i in volume_list])
return volume_id
def cinder_get_snapshot_id(self):
""" Return a random snapshot from currently
available snapshots
"""
response = cinder_request(self, 'snapshots', 'get')
snapshot_list = json.loads(response.content)['snapshots']
snapshot_id = random.choice([i['id'] for i in snapshot_list])
return snapshot_id
def cinder_get_image_id(self):
""" Return a random image from currently
available images
"""
response = nova_api.nova_request(self, 'images', 'get')
image_list = json.loads(response.content)['images']
image_id = random.choice([i['id'] for i in image_list])
return image_id
def cinder_get_server_id(self):
response = nova_api.nova_request(self, 'servers', 'get')
server_list = json.loads(response.content)['servers']
server_id = random.choice([i['id'] for i in server_list])
return server_id
def list_volumes(self):
return cinder_request(self,
'volumes',
'get',
'cinder_list_volumes')
def list_volumes_detail(self):
return cinder_request(self,
'volumes/detail',
'get',
'cinder_list_volumes_detail')
def list_volume_detail(self, volume_id=None):
if not volume_id:
volume_id = cinder_get_volume_id(self)
return cinder_request(self,
'volumes/%s' % volume_id,
'get',
'cinder_list_volume_detail',
locust_name='volumes/[id]')
def list_volume_types(self):
return cinder_request(self,
'types',
'get',
'cinder_list_volume_types')
def list_snapshots(self):
return cinder_request(self, 'snapshots', 'get',
'cinder_list_snapshots')
def list_snapshots_detail(self):
return cinder_request(self,
'snapshots/detail',
'get',
'cinder_list_snapshots_detail')
def list_snapshot_detail(self, snapshot_id=None):
if not snapshot_id:
snapshot_id = cinder_get_snapshot_id(self)
return cinder_request(self,
'snapshots/%s' %snapshot_id,
'get',
'cinder_list_snapshot_detail',
locust_name='snapshots/[id]')
def list_images(self):
return cinder_request(self,
'images',
'get',
'cinder_list_images')
def list_images_detail(self):
return cinder_request(self,
'images/detail',
'get',
'cinder_list_images_detail')
def list_image_detail(self, image_id=None):
if not image_id:
# get available images and randomly
# choose one
image_id = cinder_get_image_id(self)
return cinder_request(self,
'images/%s' % image_id,
'get',
'cinder_list_image_detail',
locust_name='images/[id]')
def list_image_metadata(self, image_id=None):
if not image_id:
image_id = cinder_get_image_id(self)
return cinder_request(self,
'images/%s/metadata' % image_id,
'get',
'cinder_list_image_metadata',
locust_name='images/[id]/metadata')
def update_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = cinder_get_image_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'post',
'cinder_update_image_metadata',
data,
locust_name='images/[id]/metadata')
def overwrite_image_metadata(self, image_id = None, metadata=None):
if not image_id:
image_id = cinder_get_image_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'images/%s/metadata' % image_id,
'put',
'cinder_overwrite_image_metadata',
data,
locust_name='images/[id]/metadata')
def create_volume(self,
volume_id=None,
snapshot_id=None,
image_id=None,
description=None,
size=1,
name=None,
bootable=False,
metadata={}
):
if not name:
name = "volume-%s" % uuid.uuid4()
data = {
"volume": {
"source_volid": volume_id,
"snapshot_id": snapshot_id,
"description": description,
"size": size,
"name": name,
"imageRef": image_id,
"bootable": bootable,
"metadata": metadata
}
}
response = cinder_request(self,
'volumes',
'post',
'cinder_create_volume',
data)
return response
def delete_volume(self, volume_id):
cinder_request(self,
'volumes/%s' % volume_id,
'delete',
'cinder_delete_volume',
locust_name='volumes/[id]')
def create_snapshot(self,
volume_id=None,
name=None,
force=False,
description=None):
if not name:
name = "snapshot-%s" % uuid.uuid4()
if not volume_id:
volume_id = get_volume_id(self)
data = { "snapshot": {
"name": name,
"description": description,
"volume_id": volume_id,
"force": force
}
}
response = cinder_request(self,
'snapshots',
'post',
'cinder_create_snapshot',
data)
return response
def delete_snapshot(self, snapshot_id):
cinder_request(self,
'snapshots/%s' % snapshot_id,
'delete',
'cinder_delete_snapshot',
locust_name='volumes/[id]')
def resize_server(self, server_id, flavor_id=None):
data = {
"resize": {
"flavorRef": flavor_id
}
}
cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resize_server',
data,
locust_name='servers/[resize]/[id]')
def confirm_resize_server(self, server_id):
data = { "confirmResize": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_confirm_resize_server',
data,
locust_name='servers/[confirm_resize]/[id]')
def revert_resize_server(self, server_id):
data = { "revertResize": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resize_server',
data,
locust_name='servers/[revert_resize]/[id]')
def suspend_server(self, server_id):
data = { "suspend": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_suspend_server',
data,
locust_name='servers/[suspend]/[id]')
def resume_server(self, server_id):
data = { "resume": None }
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_resume_server',
data,
locust_name='servers/[resume]/[id]')
def update_server_metadata(self, server_id=None, metadata=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'servers/%s/metadata' % server_id,
'post',
'cinder_update_server_metadata',
data,
locust_name='servers/[id]/metadata')
def overwrite_server_metadata(self, server_id=None, metadata=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not metadata:
metadata = cinder_get_test_metadata(self)
data = {"metadata":metadata}
return cinder_request(self,
'servers/%s/metadata' % server_id,
'put',
'cinder_overwrite_server_metadata',
data,
locust_name='servers/[id]/metadata')
def list_flavors(self):
return cinder_request(self,
'flavors',
'get',
'cinder_list_flavors')
def create_flavor(self, name=None,
ram=128,
vcpus=1,
disk=0,
id='auto',
is_public=False):
data = {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"id": id,
"os-flavor-access:is_public": is_public
}
}
return cinder_request(self,
'flavors',
'post',
'cinder_create_flavor',
data)
def create_floating_ip(self, pool=None):
data = {}
if pool:
data['pool']= pool
return cinder_request(self,
'os-floating-ips',
'post',
'cinder_create_floating_ip',
data)
def delete_floating_ip(self, floating_ip_id=None):
if not floating_ip_id:
floating_ip_id = cinder_get_floating_ip_id(self)
return cinder_request(self,
'os-floating-ips/%s' % floating_ip_id,
'delete',
'cinder_delete_floating_ip',
locust_name='os-floating-ips/[floating-ip-id]')
def list_floating_ips(self):
return cinder_request(self,
'os-floating-ips',
'get',
'cinder_list_floating_ips')
def assign_floating_ip(self,
server_id=None,
floating_ip=None,
pool=None):
if not server_id:
server_id = cinder_get_server_id(self)
if not floating_ip:
floating_ip = cinder_get_floating_ip(self)
data = {
"addFloatingIp": {
"address": floating_ip
}
}
if pool:
data['addFloatingIp']['pool']=pool
return cinder_request(self,
'servers/%s/action' % server_id,
'post',
'cinder_assign_floating_ip',
data,
locust_name='servers/[server_id]/[assign-floating-ip]')
| |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, normal_init
from mmcv.ops import DeformConv2d
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from .corner_head import CornerHead
@HEADS.register_module()
class CentripetalHead(CornerHead):
"""Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object
Detection.
CentripetalHead inherits from :class:`CornerHead`. It removes the
embedding branch and adds guiding shift and centripetal shift branches.
More details can be found in the `paper
<https://arxiv.org/abs/2003.09119>`_ .
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
num_feat_levels (int): Levels of feature from the previous module. 2
for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104
outputs the final feature and intermediate supervision feature and
HourglassNet-52 only outputs the final feature. Default: 2.
corner_emb_channels (int): Channel of embedding vector. Default: 1.
train_cfg (dict | None): Training config. Useless in CornerHead,
but we keep this variable for SingleStageDetector. Default: None.
test_cfg (dict | None): Testing config of CornerHead. Default: None.
loss_heatmap (dict | None): Config of corner heatmap loss. Default:
GaussianFocalLoss.
loss_embedding (dict | None): Config of corner embedding loss. Default:
AssociativeEmbeddingLoss.
loss_offset (dict | None): Config of corner offset loss. Default:
SmoothL1Loss.
loss_guiding_shift (dict): Config of guiding shift loss. Default:
SmoothL1Loss.
loss_centripetal_shift (dict): Config of centripetal shift loss.
Default: SmoothL1Loss.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
*args,
centripetal_shift_channels=2,
guiding_shift_channels=2,
feat_adaption_conv_kernel=3,
loss_guiding_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
loss_centripetal_shift=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1),
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
assert centripetal_shift_channels == 2, (
'CentripetalHead only support centripetal_shift_channels == 2')
self.centripetal_shift_channels = centripetal_shift_channels
assert guiding_shift_channels == 2, (
'CentripetalHead only support guiding_shift_channels == 2')
self.guiding_shift_channels = guiding_shift_channels
self.feat_adaption_conv_kernel = feat_adaption_conv_kernel
super(CentripetalHead, self).__init__(
*args, init_cfg=init_cfg, **kwargs)
self.loss_guiding_shift = build_loss(loss_guiding_shift)
self.loss_centripetal_shift = build_loss(loss_centripetal_shift)
def _init_centripetal_layers(self):
"""Initialize centripetal layers.
Including feature adaption deform convs (feat_adaption), deform offset
prediction convs (dcn_off), guiding shift (guiding_shift) and
centripetal shift ( centripetal_shift). Each branch has two parts:
prefix `tl_` for top-left and `br_` for bottom-right.
"""
self.tl_feat_adaption = nn.ModuleList()
self.br_feat_adaption = nn.ModuleList()
self.tl_dcn_offset = nn.ModuleList()
self.br_dcn_offset = nn.ModuleList()
self.tl_guiding_shift = nn.ModuleList()
self.br_guiding_shift = nn.ModuleList()
self.tl_centripetal_shift = nn.ModuleList()
self.br_centripetal_shift = nn.ModuleList()
for _ in range(self.num_feat_levels):
self.tl_feat_adaption.append(
DeformConv2d(self.in_channels, self.in_channels,
self.feat_adaption_conv_kernel, 1, 1))
self.br_feat_adaption.append(
DeformConv2d(self.in_channels, self.in_channels,
self.feat_adaption_conv_kernel, 1, 1))
self.tl_guiding_shift.append(
self._make_layers(
out_channels=self.guiding_shift_channels,
in_channels=self.in_channels))
self.br_guiding_shift.append(
self._make_layers(
out_channels=self.guiding_shift_channels,
in_channels=self.in_channels))
self.tl_dcn_offset.append(
ConvModule(
self.guiding_shift_channels,
self.feat_adaption_conv_kernel**2 *
self.guiding_shift_channels,
1,
bias=False,
act_cfg=None))
self.br_dcn_offset.append(
ConvModule(
self.guiding_shift_channels,
self.feat_adaption_conv_kernel**2 *
self.guiding_shift_channels,
1,
bias=False,
act_cfg=None))
self.tl_centripetal_shift.append(
self._make_layers(
out_channels=self.centripetal_shift_channels,
in_channels=self.in_channels))
self.br_centripetal_shift.append(
self._make_layers(
out_channels=self.centripetal_shift_channels,
in_channels=self.in_channels))
def _init_layers(self):
"""Initialize layers for CentripetalHead.
Including two parts: CornerHead layers and CentripetalHead layers
"""
super()._init_layers() # using _init_layers in CornerHead
self._init_centripetal_layers()
def init_weights(self):
super(CentripetalHead, self).init_weights()
for i in range(self.num_feat_levels):
normal_init(self.tl_feat_adaption[i], std=0.01)
normal_init(self.br_feat_adaption[i], std=0.01)
normal_init(self.tl_dcn_offset[i].conv, std=0.1)
normal_init(self.br_dcn_offset[i].conv, std=0.1)
_ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]
_ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]
_ = [
x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]
]
_ = [
x.conv.reset_parameters() for x in self.br_centripetal_shift[i]
]
def forward_single(self, x, lvl_ind):
"""Forward feature of a single level.
Args:
x (Tensor): Feature of a single level.
lvl_ind (int): Level index of current feature.
Returns:
tuple[Tensor]: A tuple of CentripetalHead's output for current
feature level. Containing the following Tensors:
- tl_heat (Tensor): Predicted top-left corner heatmap.
- br_heat (Tensor): Predicted bottom-right corner heatmap.
- tl_off (Tensor): Predicted top-left offset heatmap.
- br_off (Tensor): Predicted bottom-right offset heatmap.
- tl_guiding_shift (Tensor): Predicted top-left guiding shift
heatmap.
- br_guiding_shift (Tensor): Predicted bottom-right guiding
shift heatmap.
- tl_centripetal_shift (Tensor): Predicted top-left centripetal
shift heatmap.
- br_centripetal_shift (Tensor): Predicted bottom-right
centripetal shift heatmap.
"""
tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(
).forward_single(
x, lvl_ind, return_pool=True)
tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)
br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)
tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())
br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())
tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,
tl_dcn_offset)
br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,
br_dcn_offset)
tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](
tl_feat_adaption)
br_centripetal_shift = self.br_centripetal_shift[lvl_ind](
br_feat_adaption)
result_list = [
tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,
br_guiding_shift, tl_centripetal_shift, br_centripetal_shift
]
return result_list
def loss(self,
tl_heats,
br_heats,
tl_offs,
br_offs,
tl_guiding_shifts,
br_guiding_shifts,
tl_centripetal_shifts,
br_centripetal_shifts,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
level with shape (N, guiding_shift_channels, H, W).
br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
each level with shape (N, guiding_shift_channels, H, W).
tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
for each level with shape (N, centripetal_shift_channels, H,
W).
br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
shifts for each level with shape (N,
centripetal_shift_channels, H, W).
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [left, top, right, bottom] format.
gt_labels (list[Tensor]): Class indices corresponding to each box.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components. Containing the
following losses:
- det_loss (list[Tensor]): Corner keypoint losses of all
feature levels.
- off_loss (list[Tensor]): Corner offset losses of all feature
levels.
- guiding_loss (list[Tensor]): Guiding shift losses of all
feature levels.
- centripetal_loss (list[Tensor]): Centripetal shift losses of
all feature levels.
"""
targets = self.get_targets(
gt_bboxes,
gt_labels,
tl_heats[-1].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb,
with_guiding_shift=True,
with_centripetal_shift=True)
mlvl_targets = [targets for _ in range(self.num_feat_levels)]
[det_losses, off_losses, guiding_losses, centripetal_losses
] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs,
br_offs, tl_guiding_shifts, br_guiding_shifts,
tl_centripetal_shifts, br_centripetal_shifts,
mlvl_targets)
loss_dict = dict(
det_loss=det_losses,
off_loss=off_losses,
guiding_loss=guiding_losses,
centripetal_loss=centripetal_losses)
return loss_dict
def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
br_guiding_shift, tl_centripetal_shift,
br_centripetal_shift, targets):
"""Compute losses for single level.
Args:
tl_hmp (Tensor): Top-left corner heatmap for current level with
shape (N, num_classes, H, W).
br_hmp (Tensor): Bottom-right corner heatmap for current level with
shape (N, num_classes, H, W).
tl_off (Tensor): Top-left corner offset for current level with
shape (N, corner_offset_channels, H, W).
br_off (Tensor): Bottom-right corner offset for current level with
shape (N, corner_offset_channels, H, W).
tl_guiding_shift (Tensor): Top-left guiding shift for current level
with shape (N, guiding_shift_channels, H, W).
br_guiding_shift (Tensor): Bottom-right guiding shift for current
level with shape (N, guiding_shift_channels, H, W).
tl_centripetal_shift (Tensor): Top-left centripetal shift for
current level with shape (N, centripetal_shift_channels, H, W).
br_centripetal_shift (Tensor): Bottom-right centripetal shift for
current level with shape (N, centripetal_shift_channels, H, W).
targets (dict): Corner target generated by `get_targets`.
Returns:
tuple[torch.Tensor]: Losses of the head's different branches
containing the following losses:
- det_loss (Tensor): Corner keypoint loss.
- off_loss (Tensor): Corner offset loss.
- guiding_loss (Tensor): Guiding shift loss.
- centripetal_loss (Tensor): Centripetal shift loss.
"""
targets['corner_embedding'] = None
det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None,
None, tl_off, br_off,
targets)
gt_tl_guiding_shift = targets['topleft_guiding_shift']
gt_br_guiding_shift = targets['bottomright_guiding_shift']
gt_tl_centripetal_shift = targets['topleft_centripetal_shift']
gt_br_centripetal_shift = targets['bottomright_centripetal_shift']
gt_tl_heatmap = targets['topleft_heatmap']
gt_br_heatmap = targets['bottomright_heatmap']
# We only compute the offset loss at the real corner position.
# The value of real corner would be 1 in heatmap ground truth.
# The mask is computed in class agnostic mode and its shape is
# batch * 1 * width * height.
tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_tl_heatmap)
br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
gt_br_heatmap)
# Guiding shift loss
tl_guiding_loss = self.loss_guiding_shift(
tl_guiding_shift,
gt_tl_guiding_shift,
tl_mask,
avg_factor=tl_mask.sum())
br_guiding_loss = self.loss_guiding_shift(
br_guiding_shift,
gt_br_guiding_shift,
br_mask,
avg_factor=br_mask.sum())
guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0
# Centripetal shift loss
tl_centripetal_loss = self.loss_centripetal_shift(
tl_centripetal_shift,
gt_tl_centripetal_shift,
tl_mask,
avg_factor=tl_mask.sum())
br_centripetal_loss = self.loss_centripetal_shift(
br_centripetal_shift,
gt_br_centripetal_shift,
br_mask,
avg_factor=br_mask.sum())
centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0
return det_loss, off_loss, guiding_loss, centripetal_loss
def get_bboxes(self,
tl_heats,
br_heats,
tl_offs,
br_offs,
tl_guiding_shifts,
br_guiding_shifts,
tl_centripetal_shifts,
br_centripetal_shifts,
img_metas,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
tl_heats (list[Tensor]): Top-left corner heatmaps for each level
with shape (N, num_classes, H, W).
br_heats (list[Tensor]): Bottom-right corner heatmaps for each
level with shape (N, num_classes, H, W).
tl_offs (list[Tensor]): Top-left corner offsets for each level
with shape (N, corner_offset_channels, H, W).
br_offs (list[Tensor]): Bottom-right corner offsets for each level
with shape (N, corner_offset_channels, H, W).
tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
level with shape (N, guiding_shift_channels, H, W). Useless in
this function, we keep this arg because it's the raw output
from CentripetalHead.
br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
each level with shape (N, guiding_shift_channels, H, W).
Useless in this function, we keep this arg because it's the
raw output from CentripetalHead.
tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
for each level with shape (N, centripetal_shift_channels, H,
W).
br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
shifts for each level with shape (N,
centripetal_shift_channels, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
"""
assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
result_list = []
for img_id in range(len(img_metas)):
result_list.append(
self._get_bboxes_single(
tl_heats[-1][img_id:img_id + 1, :],
br_heats[-1][img_id:img_id + 1, :],
tl_offs[-1][img_id:img_id + 1, :],
br_offs[-1][img_id:img_id + 1, :],
img_metas[img_id],
tl_emb=None,
br_emb=None,
tl_centripetal_shift=tl_centripetal_shifts[-1][
img_id:img_id + 1, :],
br_centripetal_shift=br_centripetal_shifts[-1][
img_id:img_id + 1, :],
rescale=rescale,
with_nms=with_nms))
return result_list
| |
from __future__ import absolute_import
from __future__ import print_function
# Emitter expects events obeying the following grammar:
# stream ::= STREAM-START document* STREAM-END
# document ::= DOCUMENT-START node DOCUMENT-END
# node ::= SCALAR | sequence | mapping
# sequence ::= SEQUENCE-START node* SEQUENCE-END
# mapping ::= MAPPING-START (node node)* MAPPING-END
__all__ = ['Emitter', 'EmitterError']
from .error import YAMLError
from .events import *
from .compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT
class EmitterError(YAMLError):
pass
class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
allow_block):
self.scalar = scalar
self.empty = empty
self.multiline = multiline
self.allow_flow_plain = allow_flow_plain
self.allow_block_plain = allow_block_plain
self.allow_single_quoted = allow_single_quoted
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!': u'!',
u'tag:yaml.org,2002:': u'!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overriden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = u'\n'
if line_break in [u'\r', u'\n', u'\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
if dbg(DBG_EVENT):
nprint(event)
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if PY2:
if self.event.encoding \
and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
else:
if self.event.encoding \
and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s" %
self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit
and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator(u'...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s" %
self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator(u'...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s" %
self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor(u'&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.event.comment:
self.write_post_comment(self.event)
# print('seq event', self.event)
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.event.comment:
self.write_post_comment(self.event)
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor(u'*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator(u'[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u']', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator(u'{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
# if self.event.comment and self.event.comment[1]:
# self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(u',', False)
self.write_indent()
self.write_indicator(u'}', False)
self.state = self.states.pop()
else:
self.write_indicator(u',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(u':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(u':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
self.write_indent()
self.write_indicator(u'-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
if self.event.comment and self.event.comment[1]:
# final comments from a doc
self.write_pre_comment(self.event)
self.write_indent()
if self.check_simple_key():
if self.event.style == '?':
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator(u'?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
if getattr(self.event, 'style', None) != '?':
self.write_indicator(u':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(u':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and
event.value == u'')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (
isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = u'!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if (not self.event.style or self.event.style == '?') and \
self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and
self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
# if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
if self.event.comment:
self.write_post_comment(self.event)
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" %
(major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != u'!' or handle[-1] != u'!':
raise EmitterError("tag handle must start and end with '!': %r"
% (utf8(handle)))
for ch in handle[1:-1]:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
u'a' <= ch <= u'z' or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (utf8(ch), utf8(handle)))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = utf8(ch)
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return u''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = utf8(ch)
for ch in data:
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
suffix_text = u''.join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
return u'!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
u'a' <= ch <= u'z' or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (utf8(ch), utf8(anchor)))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(
scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in u',?[]{}':
flow_indicators = True
if ch == u':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == u' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in u'\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (
index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = u' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = u' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = u'%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = u'%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator(u'\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == u'\'':
data = u'\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
self.write_indicator(u'\'', False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
u'\x07': u'a',
u'\x08': u'b',
u'\x09': u't',
u'\x0A': u'n',
u'\x0B': u'v',
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
u'\"': u'\"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
u'\u2028': u'L',
u'\u2029': u'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator(u'"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
or not (u'\x20' <= ch <= u'\x7E'
or (self.allow_unicode
and (u'\xA0' <= ch <= u'\uD7FF'
or u'\uE000' <= ch <= u'\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= u'\xFF':
data = u'\\x%02X' % ord(ch)
elif ch <= u'\uFFFF':
data = u'\\u%04X' % ord(ch)
else:
data = u'\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+u'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == u' ':
data = u'\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator(u'"', False)
def determine_block_hints(self, text):
hints = u''
if text:
if text[0] in u' \n\x85\u2028\u2029':
hints += text_type(self.best_indent)
if text[-1] not in u'\n\x85\u2028\u2029':
hints += u'-'
elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
hints += u'+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'>'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != u' ' \
and text[start] == u'\n':
self.write_line_break()
leading_space = (ch == u' ')
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
spaces = (ch == u' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator(u'|'+hints, True)
if hints[-1:] == u'+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in u'\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = u' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != u' ':
if start+1 == end and self.column > self.best_width \
and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in u'\n\x85\u2028\u2029':
if text[start] == u'\n':
self.write_line_break()
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
end += 1
def write_comment(self, comment):
value = comment.value
if value[-1] == '\n':
value = value[:-1]
try:
# get original column position
col = comment.start_mark.column
if col < self.column + 1:
ValueError
except ValueError:
col = self.column + 1
# print('post_comment', self.line, self.column, value)
self.stream.write(' ' * (col - self.column) + value)
self.write_line_break()
def write_pre_comment(self, event):
comments = event.comment[1]
try:
for comment in comments:
if isinstance(event, MappingStartEvent) and \
getattr(comment, 'pre_done', None):
continue
if self.column != 0:
self.write_line_break()
self.write_comment(comment)
if isinstance(event, MappingStartEvent):
comment.pre_done = True
except TypeError:
print ('eventtt', type(event), event)
raise
def write_post_comment(self, event):
if self.event.comment[0] is None:
return
comment = event.comment[0]
self.write_comment(comment)
| |
"""Support for NuHeat thermostats."""
from datetime import datetime, timedelta
import logging
import time
from nuheat.config import SCHEDULE_HOLD, SCHEDULE_RUN, SCHEDULE_TEMPORARY_HOLD
from nuheat.util import (
celsius_to_nuheat,
fahrenheit_to_nuheat,
nuheat_to_celsius,
nuheat_to_fahrenheit,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers import event as event_helper
from homeassistant.util import Throttle
from .const import (
DOMAIN,
MANUFACTURER,
NUHEAT_API_STATE_SHIFT_DELAY,
NUHEAT_DATETIME_FORMAT,
NUHEAT_KEY_HOLD_SET_POINT_DATE_TIME,
NUHEAT_KEY_SCHEDULE_MODE,
NUHEAT_KEY_SET_POINT_TEMP,
TEMP_HOLD_TIME_SEC,
)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
# The device does not have an off function.
# To turn it off set to min_temp and PRESET_PERMANENT_HOLD
OPERATION_LIST = [HVAC_MODE_AUTO, HVAC_MODE_HEAT]
PRESET_RUN = "Run Schedule"
PRESET_TEMPORARY_HOLD = "Temporary Hold"
PRESET_PERMANENT_HOLD = "Permanent Hold"
PRESET_MODES = [PRESET_RUN, PRESET_TEMPORARY_HOLD, PRESET_PERMANENT_HOLD]
PRESET_MODE_TO_SCHEDULE_MODE_MAP = {
PRESET_RUN: SCHEDULE_RUN,
PRESET_TEMPORARY_HOLD: SCHEDULE_TEMPORARY_HOLD,
PRESET_PERMANENT_HOLD: SCHEDULE_HOLD,
}
SCHEDULE_MODE_TO_PRESET_MODE_MAP = {
value: key for key, value in PRESET_MODE_TO_SCHEDULE_MODE_MAP.items()
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the NuHeat thermostat(s)."""
api, serial_number = hass.data[DOMAIN][config_entry.entry_id]
temperature_unit = hass.config.units.temperature_unit
thermostat = await hass.async_add_executor_job(api.get_thermostat, serial_number)
entity = NuHeatThermostat(thermostat, temperature_unit)
# No longer need a service as set_hvac_mode to auto does this
# since climate 1.0 has been implemented
async_add_entities([entity], True)
class NuHeatThermostat(ClimateEntity):
"""Representation of a NuHeat Thermostat."""
def __init__(self, thermostat, temperature_unit):
"""Initialize the thermostat."""
self._thermostat = thermostat
self._temperature_unit = temperature_unit
self._schedule_mode = None
self._target_temperature = None
self._force_update = False
@property
def name(self):
"""Return the name of the thermostat."""
return self._thermostat.room
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._temperature_unit == "C":
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
if self._temperature_unit == "C":
return self._thermostat.celsius
return self._thermostat.fahrenheit
@property
def unique_id(self):
"""Return the unique id."""
return self._thermostat.serial_number
@property
def available(self):
"""Return the unique id."""
return self._thermostat.online
def set_hvac_mode(self, hvac_mode):
"""Set the system mode."""
if hvac_mode == HVAC_MODE_AUTO:
self._set_schedule_mode(SCHEDULE_RUN)
elif hvac_mode == HVAC_MODE_HEAT:
self._set_schedule_mode(SCHEDULE_HOLD)
@property
def hvac_mode(self):
"""Return current setting heat or auto."""
if self._schedule_mode in (SCHEDULE_TEMPORARY_HOLD, SCHEDULE_HOLD):
return HVAC_MODE_HEAT
return HVAC_MODE_AUTO
@property
def hvac_action(self):
"""Return current operation heat or idle."""
return CURRENT_HVAC_HEAT if self._thermostat.heating else CURRENT_HVAC_IDLE
@property
def min_temp(self):
"""Return the minimum supported temperature for the thermostat."""
if self._temperature_unit == "C":
return self._thermostat.min_celsius
return self._thermostat.min_fahrenheit
@property
def max_temp(self):
"""Return the maximum supported temperature for the thermostat."""
if self._temperature_unit == "C":
return self._thermostat.max_celsius
return self._thermostat.max_fahrenheit
@property
def target_temperature(self):
"""Return the currently programmed temperature."""
if self._temperature_unit == "C":
return nuheat_to_celsius(self._target_temperature)
return nuheat_to_fahrenheit(self._target_temperature)
@property
def preset_mode(self):
"""Return current preset mode."""
return SCHEDULE_MODE_TO_PRESET_MODE_MAP.get(self._schedule_mode, PRESET_RUN)
@property
def preset_modes(self):
"""Return available preset modes."""
return PRESET_MODES
@property
def hvac_modes(self):
"""Return list of possible operation modes."""
return OPERATION_LIST
def set_preset_mode(self, preset_mode):
"""Update the hold mode of the thermostat."""
self._set_schedule_mode(
PRESET_MODE_TO_SCHEDULE_MODE_MAP.get(preset_mode, SCHEDULE_RUN)
)
def _set_schedule_mode(self, schedule_mode):
"""Set a schedule mode."""
self._schedule_mode = schedule_mode
# Changing the property here does the actual set
self._thermostat.schedule_mode = schedule_mode
self._schedule_update()
def set_temperature(self, **kwargs):
"""Set a new target temperature."""
self._set_temperature_and_mode(
kwargs.get(ATTR_TEMPERATURE), hvac_mode=kwargs.get(ATTR_HVAC_MODE)
)
def _set_temperature_and_mode(self, temperature, hvac_mode=None, preset_mode=None):
"""Set temperature and hvac mode at the same time."""
if self._temperature_unit == "C":
target_temperature = celsius_to_nuheat(temperature)
else:
target_temperature = fahrenheit_to_nuheat(temperature)
# If they set a temperature without changing the mode
# to heat, we behave like the device does locally
# and set a temp hold.
target_schedule_mode = SCHEDULE_TEMPORARY_HOLD
if preset_mode:
target_schedule_mode = PRESET_MODE_TO_SCHEDULE_MODE_MAP.get(
preset_mode, SCHEDULE_RUN
)
elif self._schedule_mode == SCHEDULE_HOLD or (
hvac_mode and hvac_mode == HVAC_MODE_HEAT
):
target_schedule_mode = SCHEDULE_HOLD
_LOGGER.debug(
"Setting NuHeat thermostat temperature to %s %s and schedule mode: %s",
temperature,
self.temperature_unit,
target_schedule_mode,
)
target_temperature = max(
min(self._thermostat.max_temperature, target_temperature),
self._thermostat.min_temperature,
)
request = {
NUHEAT_KEY_SET_POINT_TEMP: target_temperature,
NUHEAT_KEY_SCHEDULE_MODE: target_schedule_mode,
}
if target_schedule_mode == SCHEDULE_TEMPORARY_HOLD:
request[NUHEAT_KEY_HOLD_SET_POINT_DATE_TIME] = datetime.fromtimestamp(
time.time() + TEMP_HOLD_TIME_SEC
).strftime(NUHEAT_DATETIME_FORMAT)
self._thermostat.set_data(request)
self._schedule_mode = target_schedule_mode
self._target_temperature = target_temperature
self._schedule_update()
def _schedule_update(self):
if not self.hass:
return
# Update the new state
self.schedule_update_ha_state(False)
# nuheat has a delay switching state
# so we schedule a poll of the api
# in the future to make sure the change actually
# took effect
event_helper.call_later(
self.hass, NUHEAT_API_STATE_SHIFT_DELAY, self._schedule_force_refresh
)
def _schedule_force_refresh(self, _):
self._force_update = True
self.schedule_update_ha_state(True)
def update(self):
"""Get the latest state from the thermostat."""
if self._force_update:
self._throttled_update(no_throttle=True)
self._force_update = False
else:
self._throttled_update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def _throttled_update(self, **kwargs):
"""Get the latest state from the thermostat with a throttle."""
self._thermostat.get_data()
self._schedule_mode = self._thermostat.schedule_mode
self._target_temperature = self._thermostat.target_temperature
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self._thermostat.serial_number)},
"name": self._thermostat.room,
"model": "nVent Signature",
"manufacturer": MANUFACTURER,
}
| |
# Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
import time
class TestFSx(BaseTest):
def test_fsx_resource(self):
session_factory = self.replay_flight_data('test_fsx_resource')
p = self.load_policy(
{
'name': 'test-fsx',
'resource': 'fsx',
'filters': [
{
'tag:Name': 'test'
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources))
def test_fsx_tag_resource(self):
session_factory = self.replay_flight_data('test_fsx_tag_resource')
p = self.load_policy(
{
'name': 'test-fsx',
'resource': 'fsx',
'filters': [
{
'tag:Name': 'test'
}
],
'actions': [
{
'type': 'tag',
'key': 'test',
'value': 'test-value'
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources))
client = session_factory().client('fsx')
tags = client.list_tags_for_resource(ResourceARN=resources[0]['ResourceARN'])
self.assertTrue([t for t in tags['Tags'] if t['Key'] == 'test'])
def test_fsx_remove_tag_resource(self):
session_factory = self.replay_flight_data('test_fsx_remove_tag_resource')
p = self.load_policy(
{
'name': 'test-fsx',
'resource': 'fsx',
'filters': [
{
'tag:Name': 'test'
}
],
'actions': [
{
'type': 'remove-tag',
'tags': [
'maid_status',
'test'
],
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources))
client = session_factory().client('fsx')
tags = client.list_tags_for_resource(ResourceARN=resources[0]['ResourceARN'])
self.assertFalse([t for t in tags['Tags'] if t['Key'] != 'Name'])
def test_fsx_mark_for_op_resource(self):
session_factory = self.replay_flight_data('test_fsx_mark_for_op_resource')
p = self.load_policy(
{
'name': 'test-fsx',
'resource': 'fsx',
'filters': [
{
'tag:Name': 'test'
}
],
'actions': [
{
'type': 'mark-for-op',
'op': 'tag'
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources))
client = session_factory().client('fsx')
tags = client.list_tags_for_resource(ResourceARN=resources[0]['ResourceARN'])
self.assertTrue([t for t in tags['Tags'] if t['Key'] == 'maid_status'])
def test_fsx_update_configuration(self):
session_factory = self.replay_flight_data('test_fsx_update_configuration')
p = self.load_policy(
{
'name': 'test-update-fsx-configuration',
'resource': 'fsx',
'filters': [
{
'WindowsConfiguration.AutomaticBackupRetentionDays': 1
}
],
'actions': [
{
'type': 'update',
'WindowsConfiguration': {
'AutomaticBackupRetentionDays': 3
}
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('fsx')
new_resources = client.describe_file_systems()['FileSystems']
self.assertEqual(len(resources), 1)
self.assertEqual(
new_resources[0]['FileSystemId'],
resources[0]['FileSystemId']
)
self.assertEqual(
new_resources[0]['WindowsConfiguration']['AutomaticBackupRetentionDays'], 3)
def test_fsx_create_bad_backup(self):
session_factory = self.replay_flight_data('test_fsx_create_backup_with_errors')
p = self.load_policy(
{
'name': 'test-update-fsx-configuration',
'resource': 'fsx',
'filters': [
{
'FileSystemId': 'fs-0bc98cbfb6b356896'
}
],
'actions': [
{
'type': 'backup',
'tags': {
'test-tag': 'backup-tag'
}
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-0bc98cbfb6b356896']
},
{
'Name': 'backup-type',
'Values': ['USER_INITIATED']
}
]
)
self.assertEqual(len(backups['Backups']), 0)
def test_fsx_create_backup(self):
session_factory = self.replay_flight_data('test_fsx_create_backup')
p = self.load_policy(
{
'name': 'test-update-fsx-configuration',
'resource': 'fsx',
'filters': [
{
'FileSystemId': 'fs-002ccbccdcf032728'
}
],
'actions': [
{
'type': 'backup',
'copy-tags': True,
'tags': {
'test-tag': 'backup-tag'
}
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory().client('fsx')
if self.recording:
time.sleep(500)
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
},
{
'Name': 'backup-type',
'Values': ['USER_INITIATED']
}
]
)
self.assertEqual(len(backups['Backups']), 1)
expected_tags = resources[0]['Tags']
expected_tags.append({'Key': 'test-tag', 'Value': 'backup-tag'})
expected_tag_map = {t['Key']: t['Value'] for t in expected_tags}
final_tag_map = {t['Key']: t['Value'] for t in backups['Backups'][0]['Tags']}
self.assertEqual(expected_tag_map, final_tag_map)
def test_fsx_create_backup_without_copy_tags(self):
session_factory = self.replay_flight_data('test_fsx_create_backup_without_copy_tags')
p = self.load_policy(
{
'name': 'test-update-fsx-configuration',
'resource': 'fsx',
'filters': [
{
'FileSystemId': 'fs-002ccbccdcf032728'
}
],
'actions': [
{
'type': 'backup',
'copy-tags': False,
'tags': {
'test-tag': 'backup-tag'
}
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
if self.recording:
time.sleep(500)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
},
{
'Name': 'backup-type',
'Values': ['USER_INITIATED']
}
]
)
self.assertEqual(len(backups['Backups']), 1)
expected_tags = [{'Key': 'test-tag', 'Value': 'backup-tag'}]
self.assertEqual(expected_tags, backups['Backups'][0]['Tags'])
def test_fsx_delete_file_system_skip_snapshot(self):
session_factory = self.replay_flight_data('test_fsx_delete_file_system_skip_snapshot')
p = self.load_policy(
{
'name': 'fsx-delete-file-system',
'resource': 'fsx',
'filters': [
{
'type': 'value',
'key': 'Lifecycle',
'value': 'AVAILABLE'
}
],
'actions': [
{
'type': 'delete',
'skip-snapshot': True
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
fs = client.describe_file_systems(
FileSystemIds=[resources[0]['FileSystemId']])['FileSystems']
self.assertTrue(len(fs), 1)
self.assertEqual(fs[0]['Lifecycle'], 'DELETING')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': [fs[0]['FileSystemId']]
},
{
'Name': 'backup-type',
'Values': ['USER_INITIATED']
}
]
)['Backups']
self.assertEqual(len(backups), 0)
def test_fsx_delete_file_system(self):
session_factory = self.replay_flight_data('test_fsx_delete_file_system')
p = self.load_policy(
{
'name': 'fsx-delete-file-system',
'resource': 'fsx',
'filters': [
{
'type': 'value',
'key': 'Lifecycle',
'value': 'AVAILABLE'
}
],
'actions': [
{
'type': 'delete',
'tags': {
'DeletedBy': 'CloudCustodian'
},
'skip-snapshot': False
}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
fs = client.describe_file_systems(
FileSystemIds=[resources[0]['FileSystemId']])['FileSystems']
self.assertTrue(len(fs), 1)
self.assertEqual(fs[0]['Lifecycle'], 'DELETING')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': [fs[0]['FileSystemId']]
},
{
'Name': 'backup-type',
'Values': ['USER_INITIATED']
}
]
)['Backups']
self.assertEqual(len(backups), 1)
def test_fsx_delete_file_system_with_error(self):
session_factory = self.replay_flight_data('test_fsx_delete_file_system_with_error')
p = self.load_policy(
{
'name': 'fsx-delete-file-system',
'resource': 'fsx',
'filters': [
{
'type': 'value',
'key': 'Lifecycle',
'value': 'CREATING'
}
],
'actions': [
{'type': 'delete'}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
fs = client.describe_file_systems(
FileSystemIds=[resources[0]['FileSystemId']])['FileSystems']
self.assertTrue(len(fs), 1)
self.assertNotEqual(fs[0]['Lifecycle'], 'DELETING')
class TestFSxBackup(BaseTest):
def test_fsx_backup_delete(self):
session_factory = self.replay_flight_data('test_fsx_backup_delete')
backup_id = 'backup-0d1fb25003287b260'
p = self.load_policy(
{
'name': 'fsx-backup-resource',
'resource': 'fsx-backup',
'filters': [
{'BackupId': backup_id}
],
'actions': [
{'type': 'delete'}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(resources)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
}
]
)['Backups']
results = [b for b in backups if b['BackupId'] == backup_id]
self.assertFalse(results)
def test_fsx_backup_tag(self):
session_factory = self.replay_flight_data('test_fsx_backup_tag')
backup_id = 'backup-0b644cd380298f720'
p = self.load_policy(
{
'name': 'fsx-backup-resource-tag',
'resource': 'fsx-backup',
'filters': [
{'BackupId': backup_id},
{'Tags': []}
],
'actions': [
{'type': 'tag', 'tags': {'tag-test': 'tag-test'}}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
}
]
)['Backups']
tags = None
for b in backups:
if b['BackupId'] == backup_id:
self.assertTrue(len(b['Tags']), 1)
tags = b['Tags']
self.assertTrue(tags)
self.assertEqual(tags[0]['Key'], 'tag-test')
self.assertEqual(tags[0]['Value'], 'tag-test')
def test_fsx_backup_mark_for_op(self):
session_factory = self.replay_flight_data('test_fsx_backup_mark_for_op')
backup_id = 'backup-09d3dfca849cfc629'
p = self.load_policy(
{
'name': 'fsx-backup-resource-mark-for-op',
'resource': 'fsx-backup',
'filters': [
{'BackupId': backup_id},
{'Tags': []}
],
'actions': [
{'type': 'mark-for-op', 'op': 'delete'}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
}
]
)['Backups']
tags = None
for b in backups:
if b['BackupId'] == backup_id:
self.assertTrue(len(b['Tags']), 1)
tags = [t for t in b['Tags'] if t['Key'] == 'maid_status']
self.assertTrue(tags)
def test_fsx_backup_remove_tag(self):
session_factory = self.replay_flight_data('test_fsx_backup_remove_tag')
backup_id = 'backup-05c81253149962783'
p = self.load_policy(
{
'name': 'fsx-backup-resource-remove-tag',
'resource': 'fsx-backup',
'filters': [
{'BackupId': backup_id},
{'tag:test-tag': 'backup-tag'},
],
'actions': [
{'type': 'remove-tag', 'tags': ['test-tag']}
]
},
session_factory=session_factory
)
resources = p.run()
self.assertTrue(len(resources), 1)
client = session_factory().client('fsx')
backups = client.describe_backups(
Filters=[
{
'Name': 'file-system-id',
'Values': ['fs-002ccbccdcf032728']
}
]
)['Backups']
tags = [1]
for b in backups:
if b['BackupId'] == backup_id:
if len(b['Tags']) == 0:
tags = b['Tags']
self.assertEqual(len(tags), 0)
def test_kms_key_filter(self):
session_factory = self.replay_flight_data("test_fsx_kms_key_filter")
p = self.load_policy(
{
"name": "fsx-kms-key-filters",
"resource": "fsx",
"filters": [
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "^(alias/aws/fsx)",
"op": "regex"
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(len(resources[0]['c7n:matched-kms-key']), 1)
def test_kms_key_filter_fsx_backup(self):
session_factory = self.replay_flight_data("test_kms_key_filter_fsx_backup")
p = self.load_policy(
{
"name": "kms_key_filter_fsx_backup",
"resource": "fsx-backup",
"filters": [
{
"type": "kms-key",
"key": "c7n:AliasName",
"value": "^(alias/aws/fsx)",
"op": "regex"
}
]
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
for r in resources:
self.assertEqual(len(r['c7n:matched-kms-key']), 1)
| |
import json
import inspect
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import validate, ValidationError
from tornado_json.utils import is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
return pattern, handler_class
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
| |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseBadRequest,\
HttpResponseForbidden
from bootcamp.feeds.models import Feed
from bootcamp.activities.models import Activity
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.loader import render_to_string
from django.template.context_processors import csrf
import json
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
FEEDS_NUM_PAGES = 10
@login_required
def feeds(request):
print "feeds views"
all_feeds = Feed.get_feeds()
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
feeds = paginator.page(1)
print feeds
from_feed = -1
if feeds:
from_feed = feeds[0].id
return render(request, 'feeds/feeds.html', {
'feeds': feeds,
'from_feed': from_feed,
'page': 1,
})
def feed(request, pk):
feed = get_object_or_404(Feed, pk=pk)
return render(request, 'feeds/feed.html', {'feed': feed})
@login_required
@ajax_required
def load(request):
from_feed = request.GET.get('from_feed')
page = request.GET.get('page')
feed_source = request.GET.get('feed_source')
all_feeds = Feed.get_feeds(from_feed)
if feed_source != 'all':
all_feeds = all_feeds.filter(user__id=feed_source)
paginator = Paginator(all_feeds, FEEDS_NUM_PAGES)
try:
feeds = paginator.page(page)
except PageNotAnInteger:
return HttpResponseBadRequest()
except EmptyPage:
feeds = []
html = u''
csrf_token = unicode(csrf(request)['csrf_token'])
for feed in feeds:
html = u'{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': request.user,
'csrf_token': csrf_token
}))
return HttpResponse(html)
def _html_feeds(last_feed, user, csrf_token, feed_source='all'):
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
html = u''
for feed in feeds:
html = u'{0}{1}'.format(html,
render_to_string('feeds/partial_feed.html',
{
'feed': feed,
'user': user,
'csrf_token': csrf_token
}))
return html
@login_required
@ajax_required
def load_new(request):
last_feed = request.GET.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def check(request):
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds_after(last_feed)
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
count = feeds.count()
return HttpResponse(count)
@login_required
@ajax_required
def post(request):
last_feed = request.POST.get('last_feed')
user = request.user
csrf_token = unicode(csrf(request)['csrf_token'])
feed = Feed()
feed.user = user
post = request.POST['post']
post = post.strip()
if len(post) > 0:
feed.post = post[:255]
feed.save()
html = _html_feeds(last_feed, user, csrf_token)
return HttpResponse(html)
@login_required
@ajax_required
def like(request):
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
user = request.user
like = Activity.objects.filter(activity_type=Activity.LIKE, feed=feed_id,
user=user)
if like:
user.profile.unotify_liked(feed)
like.delete()
else:
like = Activity(activity_type=Activity.LIKE, feed=feed_id, user=user)
like.save()
user.profile.notify_liked(feed)
return HttpResponse(feed.calculate_likes())
@login_required
@ajax_required
def comment(request):
if request.method == 'POST':
feed_id = request.POST['feed']
feed = Feed.objects.get(pk=feed_id)
post = request.POST['post']
post = post.strip()
if len(post) > 0:
post = post[:255]
user = request.user
feed.comment(user=user, post=post)
user.profile.notify_commented(feed)
user.profile.notify_also_commented(feed)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
else:
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html',
{'feed': feed})
@login_required
@ajax_required
def update(request):
first_feed = request.GET.get('first_feed')
last_feed = request.GET.get('last_feed')
feed_source = request.GET.get('feed_source')
feeds = Feed.get_feeds().filter(id__range=(last_feed, first_feed))
if feed_source != 'all':
feeds = feeds.filter(user__id=feed_source)
dump = {}
for feed in feeds:
dump[feed.pk] = {'likes': feed.likes, 'comments': feed.comments}
data = json.dumps(dump)
return HttpResponse(data, content_type='application/json')
@login_required
@ajax_required
def track_comments(request):
feed_id = request.GET.get('feed')
feed = Feed.objects.get(pk=feed_id)
return render(request, 'feeds/partial_feed_comments.html', {'feed': feed})
@login_required
@ajax_required
def remove(request):
try:
feed_id = request.POST.get('feed')
feed = Feed.objects.get(pk=feed_id)
if feed.user == request.user:
likes = feed.get_likes()
parent = feed.parent
for like in likes:
like.delete()
feed.delete()
if parent:
parent.calculate_comments()
return HttpResponse()
else:
return HttpResponseForbidden()
except Exception, e:
return HttpResponseBadRequest()
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for shape inference helper classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionTest(test_util.TensorFlowTestCase):
def testDimension(self):
dim = tensor_shape.Dimension(12)
self.assertEqual(12, dim.value)
self.assertEqual(12, int(dim))
self.assertEqual(dim, tensor_shape.Dimension(12))
self.assertEqual(tensor_shape.Dimension(15),
dim + tensor_shape.Dimension(3))
self.assertEqual(tensor_shape.Dimension(15), dim + 3)
self.assertEqual(tensor_shape.Dimension(24),
dim * tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(24), dim * 2)
self.assertEqual(
tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2))
self.assertEqual(tensor_shape.Dimension(6), dim // 2)
self.assertEqual(tensor_shape.Dimension(12),
dim.merge_with(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12))
self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12))
self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13))
self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(12),
tensor_shape.Dimension(12))
self.assertGreaterEqual(tensor_shape.Dimension(13),
tensor_shape.Dimension(12))
with self.assertRaises(ValueError):
dim.merge_with(tensor_shape.Dimension(13))
def testUnknownDimension(self):
dim = tensor_shape.Dimension(None)
self.assertIs(None, dim.value)
self.assertEqual(dim.value, tensor_shape.Dimension(None).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim + tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
(dim * tensor_shape.Dimension(None)).value)
self.assertEqual(
tensor_shape.Dimension(None).value,
(dim // tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
dim.merge_with(tensor_shape.Dimension(None)).value)
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(None))
def testKnownAndUnknownDimensions(self):
known = tensor_shape.Dimension(12)
unknown = tensor_shape.Dimension(None)
self.assertEqual(
tensor_shape.Dimension(None).value, (known + unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown + known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known * unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown * known).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (known // unknown).value)
self.assertEqual(
tensor_shape.Dimension(None).value, (unknown // known).value)
self.assertEqual(
tensor_shape.Dimension(12), known.merge_with(unknown))
self.assertEqual(
tensor_shape.Dimension(12), unknown.merge_with(known))
self.assertIs(None,
tensor_shape.Dimension(12) < tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) <= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) > tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(12) >= tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) < tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) <= tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) > tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) >= tensor_shape.Dimension(12))
def testAsDimension(self):
self.assertEqual(tensor_shape.Dimension(12),
tensor_shape.as_dimension(tensor_shape.Dimension(12)))
self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12))
self.assertEqual(
tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(tensor_shape.Dimension(None)).value)
self.assertEqual(tensor_shape.Dimension(None).value,
tensor_shape.as_dimension(None).value)
def testEquality(self):
self.assertTrue(tensor_shape.Dimension(12) == tensor_shape.Dimension(12))
self.assertFalse(tensor_shape.Dimension(12) == tensor_shape.Dimension(13))
self.assertIs(None,
tensor_shape.Dimension(12) == tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) == tensor_shape.Dimension(None))
def testInequality(self):
self.assertTrue(tensor_shape.Dimension(12) != tensor_shape.Dimension(13))
self.assertFalse(tensor_shape.Dimension(12) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(12) != tensor_shape.Dimension(None))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(12))
self.assertIs(None,
tensor_shape.Dimension(None) != tensor_shape.Dimension(None))
class ShapeTest(test_util.TensorFlowTestCase):
def testUnknownShape(self):
s = tensor_shape.TensorShape(None)
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertIs(None, s.ndims)
with self.assertRaises(ValueError):
len(s)
self.assertFalse(s)
self.assertIs(None, s.dims)
def testFullyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)])
s.assert_is_fully_defined()
self.assertEqual(3, s.ndims)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual([tensor_shape.Dimension(3),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)], s.dims)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(4), s[1])
self.assertEqual(tensor_shape.Dimension(7), s[2])
self.assertEqual([3, 4, 7], s.as_list())
s.assert_is_compatible_with([3, 4, 7])
s.assert_same_rank([6, 3, 7])
def testPartiallyDefinedShape(self):
s = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(None),
tensor_shape.Dimension(7)])
with self.assertRaises(ValueError):
s.assert_is_fully_defined()
self.assertEqual(3, s.ndims)
self.assertEqual(3, len(s))
self.assertTrue(s)
s.assert_has_rank(3)
self.assertEqual(tensor_shape.Dimension(3), s[0])
self.assertEqual(tensor_shape.Dimension(None).value, s[1].value)
self.assertEqual(tensor_shape.Dimension(7), s[2])
s.assert_same_rank([6, 3, 7])
def testMergeFullShapes(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([3, 4, 7])).as_list())
with self.assertRaises(ValueError):
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape([6, 3, 7]))
def testMergePartialShapes(self):
s1 = tensor_shape.TensorShape([tensor_shape.Dimension(3),
tensor_shape.Dimension(None),
tensor_shape.Dimension(7)])
s2 = tensor_shape.TensorShape([tensor_shape.Dimension(None),
tensor_shape.Dimension(4),
tensor_shape.Dimension(7)])
self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list())
def testMergeFullAndUnknownShape(self):
self.assertEqual([3, 4, 7],
tensor_shape.TensorShape([3, 4, 7]).merge_with(
tensor_shape.TensorShape(None)).as_list())
def testSlice(self):
known = tensor_shape.TensorShape([0, 1, 2, 3, 4])
self.assertEqual(tensor_shape.Dimension(2), known[2])
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4])
unknown = tensor_shape.TensorShape(None)
self.assertEqual(tensor_shape.Dimension(None).value, unknown[2].value)
tensor_shape.TensorShape(
[None, None, None]).assert_is_compatible_with(unknown[1:4])
def testConcatenate(self):
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape(None).concatenate(
tensor_shape.TensorShape([3, 4])))
tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with(
tensor_shape.TensorShape(None).concatenate(
tensor_shape.TensorShape(None)))
tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(
tensor_shape.TensorShape([1, 2]).concatenate(
tensor_shape.Dimension(3)))
def testHelpers(self):
tensor_shape.TensorShape([]).assert_is_compatible_with(
tensor_shape.scalar())
tensor_shape.TensorShape([37]).assert_is_compatible_with(
tensor_shape.vector(37))
tensor_shape.TensorShape(
[94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43))
def testTruedivFails(self):
unknown = tensor_shape.Dimension(None)
self.assertEqual((unknown // unknown).value, None)
with self.assertRaisesRegexp(TypeError, r"unsupported operand type"):
unknown / unknown # pylint: disable=pointless-statement
def testConvertFromProto(self):
def make_tensor_shape_proto(shape):
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
proto = make_tensor_shape_proto([])
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([]),
tensor_shape.as_shape(proto))
proto = make_tensor_shape_proto([1, 37, 42])
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.TensorShape(proto))
self.assertEqual(tensor_shape.TensorShape([1, 37, 42]),
tensor_shape.as_shape(proto))
partial_proto_shape = tensor_shape.as_shape(
make_tensor_shape_proto([-1, 37, 42]))
partial_shape = tensor_shape.TensorShape([None, 37, 42])
self.assertNotEqual(partial_proto_shape, partial_shape)
self.assertEqual(partial_proto_shape[0].value, None)
self.assertEqual(partial_proto_shape[1].value, 37)
self.assertEqual(partial_proto_shape[2].value, 42)
self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape))
def testStr(self):
self.assertEqual("<unknown>", str(tensor_shape.unknown_shape()))
self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1)))
self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2)))
self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3)))
self.assertEqual("()", str(tensor_shape.scalar()))
self.assertEqual("(7,)", str(tensor_shape.vector(7)))
self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8)))
self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2])))
self.assertEqual("(32, ?, 1, 9)",
str(tensor_shape.TensorShape([32, None, 1, 9])))
def testAsProto(self):
self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank)
self.assertFalse(
tensor_shape.unknown_shape(ndims=3).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank)
self.assertFalse(
tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank)
if __name__ == "__main__":
googletest.main()
| |
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
CORE_SOURCE_FILES = [
'src/core/lib/profiling/basic_timers.c',
'src/core/lib/profiling/stap_timers.c',
'src/core/lib/support/alloc.c',
'src/core/lib/support/arena.c',
'src/core/lib/support/atm.c',
'src/core/lib/support/avl.c',
'src/core/lib/support/backoff.c',
'src/core/lib/support/cmdline.c',
'src/core/lib/support/cpu_iphone.c',
'src/core/lib/support/cpu_linux.c',
'src/core/lib/support/cpu_posix.c',
'src/core/lib/support/cpu_windows.c',
'src/core/lib/support/env_linux.c',
'src/core/lib/support/env_posix.c',
'src/core/lib/support/env_windows.c',
'src/core/lib/support/histogram.c',
'src/core/lib/support/host_port.c',
'src/core/lib/support/log.c',
'src/core/lib/support/log_android.c',
'src/core/lib/support/log_linux.c',
'src/core/lib/support/log_posix.c',
'src/core/lib/support/log_windows.c',
'src/core/lib/support/mpscq.c',
'src/core/lib/support/murmur_hash.c',
'src/core/lib/support/stack_lockfree.c',
'src/core/lib/support/string.c',
'src/core/lib/support/string_posix.c',
'src/core/lib/support/string_util_windows.c',
'src/core/lib/support/string_windows.c',
'src/core/lib/support/subprocess_posix.c',
'src/core/lib/support/subprocess_windows.c',
'src/core/lib/support/sync.c',
'src/core/lib/support/sync_posix.c',
'src/core/lib/support/sync_windows.c',
'src/core/lib/support/thd.c',
'src/core/lib/support/thd_posix.c',
'src/core/lib/support/thd_windows.c',
'src/core/lib/support/time.c',
'src/core/lib/support/time_posix.c',
'src/core/lib/support/time_precise.c',
'src/core/lib/support/time_windows.c',
'src/core/lib/support/tls_pthread.c',
'src/core/lib/support/tmpfile_msys.c',
'src/core/lib/support/tmpfile_posix.c',
'src/core/lib/support/tmpfile_windows.c',
'src/core/lib/support/wrap_memcpy.c',
'src/core/lib/surface/init.c',
'src/core/lib/channel/channel_args.c',
'src/core/lib/channel/channel_stack.c',
'src/core/lib/channel/channel_stack_builder.c',
'src/core/lib/channel/connected_channel.c',
'src/core/lib/channel/handshaker.c',
'src/core/lib/channel/handshaker_factory.c',
'src/core/lib/channel/handshaker_registry.c',
'src/core/lib/compression/compression.c',
'src/core/lib/compression/message_compress.c',
'src/core/lib/debug/trace.c',
'src/core/lib/http/format_request.c',
'src/core/lib/http/httpcli.c',
'src/core/lib/http/parser.c',
'src/core/lib/iomgr/closure.c',
'src/core/lib/iomgr/combiner.c',
'src/core/lib/iomgr/endpoint.c',
'src/core/lib/iomgr/endpoint_pair_posix.c',
'src/core/lib/iomgr/endpoint_pair_uv.c',
'src/core/lib/iomgr/endpoint_pair_windows.c',
'src/core/lib/iomgr/error.c',
'src/core/lib/iomgr/ev_epoll_linux.c',
'src/core/lib/iomgr/ev_poll_posix.c',
'src/core/lib/iomgr/ev_posix.c',
'src/core/lib/iomgr/exec_ctx.c',
'src/core/lib/iomgr/executor.c',
'src/core/lib/iomgr/iocp_windows.c',
'src/core/lib/iomgr/iomgr.c',
'src/core/lib/iomgr/iomgr_posix.c',
'src/core/lib/iomgr/iomgr_uv.c',
'src/core/lib/iomgr/iomgr_windows.c',
'src/core/lib/iomgr/load_file.c',
'src/core/lib/iomgr/lockfree_event.c',
'src/core/lib/iomgr/network_status_tracker.c',
'src/core/lib/iomgr/polling_entity.c',
'src/core/lib/iomgr/pollset_set_uv.c',
'src/core/lib/iomgr/pollset_set_windows.c',
'src/core/lib/iomgr/pollset_uv.c',
'src/core/lib/iomgr/pollset_windows.c',
'src/core/lib/iomgr/resolve_address_posix.c',
'src/core/lib/iomgr/resolve_address_uv.c',
'src/core/lib/iomgr/resolve_address_windows.c',
'src/core/lib/iomgr/resource_quota.c',
'src/core/lib/iomgr/sockaddr_utils.c',
'src/core/lib/iomgr/socket_factory_posix.c',
'src/core/lib/iomgr/socket_mutator.c',
'src/core/lib/iomgr/socket_utils_common_posix.c',
'src/core/lib/iomgr/socket_utils_linux.c',
'src/core/lib/iomgr/socket_utils_posix.c',
'src/core/lib/iomgr/socket_utils_uv.c',
'src/core/lib/iomgr/socket_utils_windows.c',
'src/core/lib/iomgr/socket_windows.c',
'src/core/lib/iomgr/tcp_client_posix.c',
'src/core/lib/iomgr/tcp_client_uv.c',
'src/core/lib/iomgr/tcp_client_windows.c',
'src/core/lib/iomgr/tcp_posix.c',
'src/core/lib/iomgr/tcp_server_posix.c',
'src/core/lib/iomgr/tcp_server_utils_posix_common.c',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.c',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.c',
'src/core/lib/iomgr/tcp_server_uv.c',
'src/core/lib/iomgr/tcp_server_windows.c',
'src/core/lib/iomgr/tcp_uv.c',
'src/core/lib/iomgr/tcp_windows.c',
'src/core/lib/iomgr/time_averaged_stats.c',
'src/core/lib/iomgr/timer_generic.c',
'src/core/lib/iomgr/timer_heap.c',
'src/core/lib/iomgr/timer_uv.c',
'src/core/lib/iomgr/udp_server.c',
'src/core/lib/iomgr/unix_sockets_posix.c',
'src/core/lib/iomgr/unix_sockets_posix_noop.c',
'src/core/lib/iomgr/wakeup_fd_cv.c',
'src/core/lib/iomgr/wakeup_fd_eventfd.c',
'src/core/lib/iomgr/wakeup_fd_nospecial.c',
'src/core/lib/iomgr/wakeup_fd_pipe.c',
'src/core/lib/iomgr/wakeup_fd_posix.c',
'src/core/lib/iomgr/workqueue_uv.c',
'src/core/lib/iomgr/workqueue_windows.c',
'src/core/lib/json/json.c',
'src/core/lib/json/json_reader.c',
'src/core/lib/json/json_string.c',
'src/core/lib/json/json_writer.c',
'src/core/lib/slice/b64.c',
'src/core/lib/slice/percent_encoding.c',
'src/core/lib/slice/slice.c',
'src/core/lib/slice/slice_buffer.c',
'src/core/lib/slice/slice_hash_table.c',
'src/core/lib/slice/slice_intern.c',
'src/core/lib/slice/slice_string_helpers.c',
'src/core/lib/surface/alarm.c',
'src/core/lib/surface/api_trace.c',
'src/core/lib/surface/byte_buffer.c',
'src/core/lib/surface/byte_buffer_reader.c',
'src/core/lib/surface/call.c',
'src/core/lib/surface/call_details.c',
'src/core/lib/surface/call_log_batch.c',
'src/core/lib/surface/channel.c',
'src/core/lib/surface/channel_init.c',
'src/core/lib/surface/channel_ping.c',
'src/core/lib/surface/channel_stack_type.c',
'src/core/lib/surface/completion_queue.c',
'src/core/lib/surface/completion_queue_factory.c',
'src/core/lib/surface/event_string.c',
'src/core/lib/surface/lame_client.c',
'src/core/lib/surface/metadata_array.c',
'src/core/lib/surface/server.c',
'src/core/lib/surface/validate_metadata.c',
'src/core/lib/surface/version.c',
'src/core/lib/transport/bdp_estimator.c',
'src/core/lib/transport/byte_stream.c',
'src/core/lib/transport/connectivity_state.c',
'src/core/lib/transport/error_utils.c',
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/service_config.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/status_conversion.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
'src/core/lib/transport/transport_op_string.c',
'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.c',
'src/core/ext/transport/chttp2/transport/bin_decoder.c',
'src/core/ext/transport/chttp2/transport/bin_encoder.c',
'src/core/ext/transport/chttp2/transport/chttp2_plugin.c',
'src/core/ext/transport/chttp2/transport/chttp2_transport.c',
'src/core/ext/transport/chttp2/transport/frame_data.c',
'src/core/ext/transport/chttp2/transport/frame_goaway.c',
'src/core/ext/transport/chttp2/transport/frame_ping.c',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.c',
'src/core/ext/transport/chttp2/transport/frame_settings.c',
'src/core/ext/transport/chttp2/transport/frame_window_update.c',
'src/core/ext/transport/chttp2/transport/hpack_encoder.c',
'src/core/ext/transport/chttp2/transport/hpack_parser.c',
'src/core/ext/transport/chttp2/transport/hpack_table.c',
'src/core/ext/transport/chttp2/transport/http2_settings.c',
'src/core/ext/transport/chttp2/transport/huffsyms.c',
'src/core/ext/transport/chttp2/transport/incoming_metadata.c',
'src/core/ext/transport/chttp2/transport/parsing.c',
'src/core/ext/transport/chttp2/transport/stream_lists.c',
'src/core/ext/transport/chttp2/transport/stream_map.c',
'src/core/ext/transport/chttp2/transport/varint.c',
'src/core/ext/transport/chttp2/transport/writing.c',
'src/core/ext/transport/chttp2/alpn/alpn.c',
'src/core/ext/filters/http/client/http_client_filter.c',
'src/core/ext/filters/http/http_filters_plugin.c',
'src/core/ext/filters/http/message_compress/message_compress_filter.c',
'src/core/ext/filters/http/server/http_server_filter.c',
'src/core/lib/http/httpcli_security_connector.c',
'src/core/lib/security/context/security_context.c',
'src/core/lib/security/credentials/composite/composite_credentials.c',
'src/core/lib/security/credentials/credentials.c',
'src/core/lib/security/credentials/credentials_metadata.c',
'src/core/lib/security/credentials/fake/fake_credentials.c',
'src/core/lib/security/credentials/google_default/credentials_generic.c',
'src/core/lib/security/credentials/google_default/google_default_credentials.c',
'src/core/lib/security/credentials/iam/iam_credentials.c',
'src/core/lib/security/credentials/jwt/json_token.c',
'src/core/lib/security/credentials/jwt/jwt_credentials.c',
'src/core/lib/security/credentials/jwt/jwt_verifier.c',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.c',
'src/core/lib/security/credentials/plugin/plugin_credentials.c',
'src/core/lib/security/credentials/ssl/ssl_credentials.c',
'src/core/lib/security/transport/client_auth_filter.c',
'src/core/lib/security/transport/lb_targets_info.c',
'src/core/lib/security/transport/secure_endpoint.c',
'src/core/lib/security/transport/security_connector.c',
'src/core/lib/security/transport/security_handshaker.c',
'src/core/lib/security/transport/server_auth_filter.c',
'src/core/lib/security/transport/tsi_error.c',
'src/core/lib/security/util/json_util.c',
'src/core/lib/surface/init_secure.c',
'src/core/tsi/fake_transport_security.c',
'src/core/tsi/ssl_transport_security.c',
'src/core/tsi/transport_security.c',
'src/core/ext/transport/chttp2/server/chttp2_server.c',
'src/core/ext/transport/chttp2/client/secure/secure_channel_create.c',
'src/core/ext/filters/client_channel/channel_connectivity.c',
'src/core/ext/filters/client_channel/client_channel.c',
'src/core/ext/filters/client_channel/client_channel_factory.c',
'src/core/ext/filters/client_channel/client_channel_plugin.c',
'src/core/ext/filters/client_channel/connector.c',
'src/core/ext/filters/client_channel/http_connect_handshaker.c',
'src/core/ext/filters/client_channel/http_proxy.c',
'src/core/ext/filters/client_channel/lb_policy.c',
'src/core/ext/filters/client_channel/lb_policy_factory.c',
'src/core/ext/filters/client_channel/lb_policy_registry.c',
'src/core/ext/filters/client_channel/parse_address.c',
'src/core/ext/filters/client_channel/proxy_mapper.c',
'src/core/ext/filters/client_channel/proxy_mapper_registry.c',
'src/core/ext/filters/client_channel/resolver.c',
'src/core/ext/filters/client_channel/resolver_factory.c',
'src/core/ext/filters/client_channel/resolver_registry.c',
'src/core/ext/filters/client_channel/retry_throttle.c',
'src/core/ext/filters/client_channel/subchannel.c',
'src/core/ext/filters/client_channel/subchannel_index.c',
'src/core/ext/filters/client_channel/uri_parser.c',
'src/core/ext/filters/deadline/deadline_filter.c',
'src/core/ext/transport/chttp2/client/chttp2_connector.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2.c',
'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create.c',
'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.c',
'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
'third_party/nanopb/pb_common.c',
'third_party/nanopb/pb_decode.c',
'third_party/nanopb/pb_encode.c',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.c',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.c',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.c',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.c',
'src/core/ext/filters/load_reporting/load_reporting.c',
'src/core/ext/filters/load_reporting/load_reporting_filter.c',
'src/core/ext/census/base_resources.c',
'src/core/ext/census/context.c',
'src/core/ext/census/gen/census.pb.c',
'src/core/ext/census/gen/trace_context.pb.c',
'src/core/ext/census/grpc_context.c',
'src/core/ext/census/grpc_filter.c',
'src/core/ext/census/grpc_plugin.c',
'src/core/ext/census/initialize.c',
'src/core/ext/census/mlog.c',
'src/core/ext/census/operation.c',
'src/core/ext/census/placeholders.c',
'src/core/ext/census/resource.c',
'src/core/ext/census/trace_context.c',
'src/core/ext/census/tracing.c',
'src/core/ext/filters/max_age/max_age_filter.c',
'src/core/ext/filters/message_size/message_size_filter.c',
'src/core/plugin_registry/grpc_plugin_registry.c',
'src/boringssl/err_data.c',
'third_party/boringssl/crypto/aes/aes.c',
'third_party/boringssl/crypto/aes/mode_wrappers.c',
'third_party/boringssl/crypto/asn1/a_bitstr.c',
'third_party/boringssl/crypto/asn1/a_bool.c',
'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl/crypto/asn1/a_dup.c',
'third_party/boringssl/crypto/asn1/a_enum.c',
'third_party/boringssl/crypto/asn1/a_gentm.c',
'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl/crypto/asn1/a_int.c',
'third_party/boringssl/crypto/asn1/a_mbstr.c',
'third_party/boringssl/crypto/asn1/a_object.c',
'third_party/boringssl/crypto/asn1/a_octet.c',
'third_party/boringssl/crypto/asn1/a_print.c',
'third_party/boringssl/crypto/asn1/a_strnid.c',
'third_party/boringssl/crypto/asn1/a_time.c',
'third_party/boringssl/crypto/asn1/a_type.c',
'third_party/boringssl/crypto/asn1/a_utctm.c',
'third_party/boringssl/crypto/asn1/a_utf8.c',
'third_party/boringssl/crypto/asn1/asn1_lib.c',
'third_party/boringssl/crypto/asn1/asn1_par.c',
'third_party/boringssl/crypto/asn1/asn_pack.c',
'third_party/boringssl/crypto/asn1/f_enum.c',
'third_party/boringssl/crypto/asn1/f_int.c',
'third_party/boringssl/crypto/asn1/f_string.c',
'third_party/boringssl/crypto/asn1/t_bitst.c',
'third_party/boringssl/crypto/asn1/tasn_dec.c',
'third_party/boringssl/crypto/asn1/tasn_enc.c',
'third_party/boringssl/crypto/asn1/tasn_fre.c',
'third_party/boringssl/crypto/asn1/tasn_new.c',
'third_party/boringssl/crypto/asn1/tasn_typ.c',
'third_party/boringssl/crypto/asn1/tasn_utl.c',
'third_party/boringssl/crypto/asn1/x_bignum.c',
'third_party/boringssl/crypto/asn1/x_long.c',
'third_party/boringssl/crypto/base64/base64.c',
'third_party/boringssl/crypto/bio/bio.c',
'third_party/boringssl/crypto/bio/bio_mem.c',
'third_party/boringssl/crypto/bio/buffer.c',
'third_party/boringssl/crypto/bio/connect.c',
'third_party/boringssl/crypto/bio/fd.c',
'third_party/boringssl/crypto/bio/file.c',
'third_party/boringssl/crypto/bio/hexdump.c',
'third_party/boringssl/crypto/bio/pair.c',
'third_party/boringssl/crypto/bio/printf.c',
'third_party/boringssl/crypto/bio/socket.c',
'third_party/boringssl/crypto/bio/socket_helper.c',
'third_party/boringssl/crypto/bn/add.c',
'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
'third_party/boringssl/crypto/bn/bn.c',
'third_party/boringssl/crypto/bn/bn_asn1.c',
'third_party/boringssl/crypto/bn/cmp.c',
'third_party/boringssl/crypto/bn/convert.c',
'third_party/boringssl/crypto/bn/ctx.c',
'third_party/boringssl/crypto/bn/div.c',
'third_party/boringssl/crypto/bn/exponentiation.c',
'third_party/boringssl/crypto/bn/gcd.c',
'third_party/boringssl/crypto/bn/generic.c',
'third_party/boringssl/crypto/bn/kronecker.c',
'third_party/boringssl/crypto/bn/montgomery.c',
'third_party/boringssl/crypto/bn/montgomery_inv.c',
'third_party/boringssl/crypto/bn/mul.c',
'third_party/boringssl/crypto/bn/prime.c',
'third_party/boringssl/crypto/bn/random.c',
'third_party/boringssl/crypto/bn/rsaz_exp.c',
'third_party/boringssl/crypto/bn/shift.c',
'third_party/boringssl/crypto/bn/sqrt.c',
'third_party/boringssl/crypto/buf/buf.c',
'third_party/boringssl/crypto/bytestring/asn1_compat.c',
'third_party/boringssl/crypto/bytestring/ber.c',
'third_party/boringssl/crypto/bytestring/cbb.c',
'third_party/boringssl/crypto/bytestring/cbs.c',
'third_party/boringssl/crypto/chacha/chacha.c',
'third_party/boringssl/crypto/cipher/aead.c',
'third_party/boringssl/crypto/cipher/cipher.c',
'third_party/boringssl/crypto/cipher/derive_key.c',
'third_party/boringssl/crypto/cipher/e_aes.c',
'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
'third_party/boringssl/crypto/cipher/e_des.c',
'third_party/boringssl/crypto/cipher/e_null.c',
'third_party/boringssl/crypto/cipher/e_rc2.c',
'third_party/boringssl/crypto/cipher/e_rc4.c',
'third_party/boringssl/crypto/cipher/e_ssl3.c',
'third_party/boringssl/crypto/cipher/e_tls.c',
'third_party/boringssl/crypto/cipher/tls_cbc.c',
'third_party/boringssl/crypto/cmac/cmac.c',
'third_party/boringssl/crypto/conf/conf.c',
'third_party/boringssl/crypto/cpu-aarch64-linux.c',
'third_party/boringssl/crypto/cpu-arm-linux.c',
'third_party/boringssl/crypto/cpu-arm.c',
'third_party/boringssl/crypto/cpu-intel.c',
'third_party/boringssl/crypto/cpu-ppc64le.c',
'third_party/boringssl/crypto/crypto.c',
'third_party/boringssl/crypto/curve25519/curve25519.c',
'third_party/boringssl/crypto/curve25519/spake25519.c',
'third_party/boringssl/crypto/curve25519/x25519-x86_64.c',
'third_party/boringssl/crypto/des/des.c',
'third_party/boringssl/crypto/dh/check.c',
'third_party/boringssl/crypto/dh/dh.c',
'third_party/boringssl/crypto/dh/dh_asn1.c',
'third_party/boringssl/crypto/dh/params.c',
'third_party/boringssl/crypto/digest/digest.c',
'third_party/boringssl/crypto/digest/digests.c',
'third_party/boringssl/crypto/dsa/dsa.c',
'third_party/boringssl/crypto/dsa/dsa_asn1.c',
'third_party/boringssl/crypto/ec/ec.c',
'third_party/boringssl/crypto/ec/ec_asn1.c',
'third_party/boringssl/crypto/ec/ec_key.c',
'third_party/boringssl/crypto/ec/ec_montgomery.c',
'third_party/boringssl/crypto/ec/oct.c',
'third_party/boringssl/crypto/ec/p224-64.c',
'third_party/boringssl/crypto/ec/p256-64.c',
'third_party/boringssl/crypto/ec/p256-x86_64.c',
'third_party/boringssl/crypto/ec/simple.c',
'third_party/boringssl/crypto/ec/util-64.c',
'third_party/boringssl/crypto/ec/wnaf.c',
'third_party/boringssl/crypto/ecdh/ecdh.c',
'third_party/boringssl/crypto/ecdsa/ecdsa.c',
'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
'third_party/boringssl/crypto/engine/engine.c',
'third_party/boringssl/crypto/err/err.c',
'third_party/boringssl/crypto/evp/digestsign.c',
'third_party/boringssl/crypto/evp/evp.c',
'third_party/boringssl/crypto/evp/evp_asn1.c',
'third_party/boringssl/crypto/evp/evp_ctx.c',
'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl/crypto/evp/p_ec.c',
'third_party/boringssl/crypto/evp/p_ec_asn1.c',
'third_party/boringssl/crypto/evp/p_rsa.c',
'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl/crypto/evp/pbkdf.c',
'third_party/boringssl/crypto/evp/print.c',
'third_party/boringssl/crypto/evp/sign.c',
'third_party/boringssl/crypto/ex_data.c',
'third_party/boringssl/crypto/hkdf/hkdf.c',
'third_party/boringssl/crypto/hmac/hmac.c',
'third_party/boringssl/crypto/lhash/lhash.c',
'third_party/boringssl/crypto/md4/md4.c',
'third_party/boringssl/crypto/md5/md5.c',
'third_party/boringssl/crypto/mem.c',
'third_party/boringssl/crypto/modes/cbc.c',
'third_party/boringssl/crypto/modes/cfb.c',
'third_party/boringssl/crypto/modes/ctr.c',
'third_party/boringssl/crypto/modes/gcm.c',
'third_party/boringssl/crypto/modes/ofb.c',
'third_party/boringssl/crypto/newhope/error_correction.c',
'third_party/boringssl/crypto/newhope/newhope.c',
'third_party/boringssl/crypto/newhope/ntt.c',
'third_party/boringssl/crypto/newhope/poly.c',
'third_party/boringssl/crypto/newhope/precomp.c',
'third_party/boringssl/crypto/newhope/reduce.c',
'third_party/boringssl/crypto/obj/obj.c',
'third_party/boringssl/crypto/obj/obj_xref.c',
'third_party/boringssl/crypto/pem/pem_all.c',
'third_party/boringssl/crypto/pem/pem_info.c',
'third_party/boringssl/crypto/pem/pem_lib.c',
'third_party/boringssl/crypto/pem/pem_oth.c',
'third_party/boringssl/crypto/pem/pem_pk8.c',
'third_party/boringssl/crypto/pem/pem_pkey.c',
'third_party/boringssl/crypto/pem/pem_x509.c',
'third_party/boringssl/crypto/pem/pem_xaux.c',
'third_party/boringssl/crypto/pkcs8/p5_pbe.c',
'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
'third_party/boringssl/crypto/pkcs8/pkcs8.c',
'third_party/boringssl/crypto/poly1305/poly1305.c',
'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl/crypto/rand/deterministic.c',
'third_party/boringssl/crypto/rand/rand.c',
'third_party/boringssl/crypto/rand/urandom.c',
'third_party/boringssl/crypto/rand/windows.c',
'third_party/boringssl/crypto/rc4/rc4.c',
'third_party/boringssl/crypto/refcount_c11.c',
'third_party/boringssl/crypto/refcount_lock.c',
'third_party/boringssl/crypto/rsa/blinding.c',
'third_party/boringssl/crypto/rsa/padding.c',
'third_party/boringssl/crypto/rsa/rsa.c',
'third_party/boringssl/crypto/rsa/rsa_asn1.c',
'third_party/boringssl/crypto/rsa/rsa_impl.c',
'third_party/boringssl/crypto/sha/sha1.c',
'third_party/boringssl/crypto/sha/sha256.c',
'third_party/boringssl/crypto/sha/sha512.c',
'third_party/boringssl/crypto/stack/stack.c',
'third_party/boringssl/crypto/thread.c',
'third_party/boringssl/crypto/thread_none.c',
'third_party/boringssl/crypto/thread_pthread.c',
'third_party/boringssl/crypto/thread_win.c',
'third_party/boringssl/crypto/time_support.c',
'third_party/boringssl/crypto/x509/a_digest.c',
'third_party/boringssl/crypto/x509/a_sign.c',
'third_party/boringssl/crypto/x509/a_strex.c',
'third_party/boringssl/crypto/x509/a_verify.c',
'third_party/boringssl/crypto/x509/algorithm.c',
'third_party/boringssl/crypto/x509/asn1_gen.c',
'third_party/boringssl/crypto/x509/by_dir.c',
'third_party/boringssl/crypto/x509/by_file.c',
'third_party/boringssl/crypto/x509/i2d_pr.c',
'third_party/boringssl/crypto/x509/pkcs7.c',
'third_party/boringssl/crypto/x509/rsa_pss.c',
'third_party/boringssl/crypto/x509/t_crl.c',
'third_party/boringssl/crypto/x509/t_req.c',
'third_party/boringssl/crypto/x509/t_x509.c',
'third_party/boringssl/crypto/x509/t_x509a.c',
'third_party/boringssl/crypto/x509/x509.c',
'third_party/boringssl/crypto/x509/x509_att.c',
'third_party/boringssl/crypto/x509/x509_cmp.c',
'third_party/boringssl/crypto/x509/x509_d2.c',
'third_party/boringssl/crypto/x509/x509_def.c',
'third_party/boringssl/crypto/x509/x509_ext.c',
'third_party/boringssl/crypto/x509/x509_lu.c',
'third_party/boringssl/crypto/x509/x509_obj.c',
'third_party/boringssl/crypto/x509/x509_r2x.c',
'third_party/boringssl/crypto/x509/x509_req.c',
'third_party/boringssl/crypto/x509/x509_set.c',
'third_party/boringssl/crypto/x509/x509_trs.c',
'third_party/boringssl/crypto/x509/x509_txt.c',
'third_party/boringssl/crypto/x509/x509_v3.c',
'third_party/boringssl/crypto/x509/x509_vfy.c',
'third_party/boringssl/crypto/x509/x509_vpm.c',
'third_party/boringssl/crypto/x509/x509cset.c',
'third_party/boringssl/crypto/x509/x509name.c',
'third_party/boringssl/crypto/x509/x509rset.c',
'third_party/boringssl/crypto/x509/x509spki.c',
'third_party/boringssl/crypto/x509/x509type.c',
'third_party/boringssl/crypto/x509/x_algor.c',
'third_party/boringssl/crypto/x509/x_all.c',
'third_party/boringssl/crypto/x509/x_attrib.c',
'third_party/boringssl/crypto/x509/x_crl.c',
'third_party/boringssl/crypto/x509/x_exten.c',
'third_party/boringssl/crypto/x509/x_info.c',
'third_party/boringssl/crypto/x509/x_name.c',
'third_party/boringssl/crypto/x509/x_pkey.c',
'third_party/boringssl/crypto/x509/x_pubkey.c',
'third_party/boringssl/crypto/x509/x_req.c',
'third_party/boringssl/crypto/x509/x_sig.c',
'third_party/boringssl/crypto/x509/x_spki.c',
'third_party/boringssl/crypto/x509/x_val.c',
'third_party/boringssl/crypto/x509/x_x509.c',
'third_party/boringssl/crypto/x509/x_x509a.c',
'third_party/boringssl/crypto/x509v3/pcy_cache.c',
'third_party/boringssl/crypto/x509v3/pcy_data.c',
'third_party/boringssl/crypto/x509v3/pcy_lib.c',
'third_party/boringssl/crypto/x509v3/pcy_map.c',
'third_party/boringssl/crypto/x509v3/pcy_node.c',
'third_party/boringssl/crypto/x509v3/pcy_tree.c',
'third_party/boringssl/crypto/x509v3/v3_akey.c',
'third_party/boringssl/crypto/x509v3/v3_akeya.c',
'third_party/boringssl/crypto/x509v3/v3_alt.c',
'third_party/boringssl/crypto/x509v3/v3_bcons.c',
'third_party/boringssl/crypto/x509v3/v3_bitst.c',
'third_party/boringssl/crypto/x509v3/v3_conf.c',
'third_party/boringssl/crypto/x509v3/v3_cpols.c',
'third_party/boringssl/crypto/x509v3/v3_crld.c',
'third_party/boringssl/crypto/x509v3/v3_enum.c',
'third_party/boringssl/crypto/x509v3/v3_extku.c',
'third_party/boringssl/crypto/x509v3/v3_genn.c',
'third_party/boringssl/crypto/x509v3/v3_ia5.c',
'third_party/boringssl/crypto/x509v3/v3_info.c',
'third_party/boringssl/crypto/x509v3/v3_int.c',
'third_party/boringssl/crypto/x509v3/v3_lib.c',
'third_party/boringssl/crypto/x509v3/v3_ncons.c',
'third_party/boringssl/crypto/x509v3/v3_pci.c',
'third_party/boringssl/crypto/x509v3/v3_pcia.c',
'third_party/boringssl/crypto/x509v3/v3_pcons.c',
'third_party/boringssl/crypto/x509v3/v3_pku.c',
'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl/crypto/x509v3/v3_prn.c',
'third_party/boringssl/crypto/x509v3/v3_purp.c',
'third_party/boringssl/crypto/x509v3/v3_skey.c',
'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
'third_party/boringssl/crypto/x509v3/v3_utl.c',
'third_party/boringssl/ssl/custom_extensions.c',
'third_party/boringssl/ssl/d1_both.c',
'third_party/boringssl/ssl/d1_lib.c',
'third_party/boringssl/ssl/d1_pkt.c',
'third_party/boringssl/ssl/d1_srtp.c',
'third_party/boringssl/ssl/dtls_method.c',
'third_party/boringssl/ssl/dtls_record.c',
'third_party/boringssl/ssl/handshake_client.c',
'third_party/boringssl/ssl/handshake_server.c',
'third_party/boringssl/ssl/s3_both.c',
'third_party/boringssl/ssl/s3_enc.c',
'third_party/boringssl/ssl/s3_lib.c',
'third_party/boringssl/ssl/s3_pkt.c',
'third_party/boringssl/ssl/ssl_aead_ctx.c',
'third_party/boringssl/ssl/ssl_asn1.c',
'third_party/boringssl/ssl/ssl_buffer.c',
'third_party/boringssl/ssl/ssl_cert.c',
'third_party/boringssl/ssl/ssl_cipher.c',
'third_party/boringssl/ssl/ssl_ecdh.c',
'third_party/boringssl/ssl/ssl_file.c',
'third_party/boringssl/ssl/ssl_lib.c',
'third_party/boringssl/ssl/ssl_rsa.c',
'third_party/boringssl/ssl/ssl_session.c',
'third_party/boringssl/ssl/ssl_stat.c',
'third_party/boringssl/ssl/t1_enc.c',
'third_party/boringssl/ssl/t1_lib.c',
'third_party/boringssl/ssl/tls13_both.c',
'third_party/boringssl/ssl/tls13_client.c',
'third_party/boringssl/ssl/tls13_enc.c',
'third_party/boringssl/ssl/tls13_server.c',
'third_party/boringssl/ssl/tls_method.c',
'third_party/boringssl/ssl/tls_record.c',
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
'third_party/cares/cares/ares__close_sockets.c',
'third_party/cares/cares/ares__get_hostent.c',
'third_party/cares/cares/ares__read_line.c',
'third_party/cares/cares/ares__timeval.c',
'third_party/cares/cares/ares_cancel.c',
'third_party/cares/cares/ares_create_query.c',
'third_party/cares/cares/ares_data.c',
'third_party/cares/cares/ares_destroy.c',
'third_party/cares/cares/ares_expand_name.c',
'third_party/cares/cares/ares_expand_string.c',
'third_party/cares/cares/ares_fds.c',
'third_party/cares/cares/ares_free_hostent.c',
'third_party/cares/cares/ares_free_string.c',
'third_party/cares/cares/ares_getenv.c',
'third_party/cares/cares/ares_gethostbyaddr.c',
'third_party/cares/cares/ares_gethostbyname.c',
'third_party/cares/cares/ares_getnameinfo.c',
'third_party/cares/cares/ares_getopt.c',
'third_party/cares/cares/ares_getsock.c',
'third_party/cares/cares/ares_init.c',
'third_party/cares/cares/ares_library_init.c',
'third_party/cares/cares/ares_llist.c',
'third_party/cares/cares/ares_mkquery.c',
'third_party/cares/cares/ares_nowarn.c',
'third_party/cares/cares/ares_options.c',
'third_party/cares/cares/ares_parse_a_reply.c',
'third_party/cares/cares/ares_parse_aaaa_reply.c',
'third_party/cares/cares/ares_parse_mx_reply.c',
'third_party/cares/cares/ares_parse_naptr_reply.c',
'third_party/cares/cares/ares_parse_ns_reply.c',
'third_party/cares/cares/ares_parse_ptr_reply.c',
'third_party/cares/cares/ares_parse_soa_reply.c',
'third_party/cares/cares/ares_parse_srv_reply.c',
'third_party/cares/cares/ares_parse_txt_reply.c',
'third_party/cares/cares/ares_platform.c',
'third_party/cares/cares/ares_process.c',
'third_party/cares/cares/ares_query.c',
'third_party/cares/cares/ares_search.c',
'third_party/cares/cares/ares_send.c',
'third_party/cares/cares/ares_strcasecmp.c',
'third_party/cares/cares/ares_strdup.c',
'third_party/cares/cares/ares_strerror.c',
'third_party/cares/cares/ares_timeout.c',
'third_party/cares/cares/ares_version.c',
'third_party/cares/cares/ares_writev.c',
'third_party/cares/cares/bitncmp.c',
'third_party/cares/cares/inet_net_pton.c',
'third_party/cares/cares/inet_ntop.c',
'third_party/cares/cares/windows_port.c',
]
| |
##########################################################################
#
# Copyright (c) 2013-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
from Qt import QtWidgets
## A simple PlugValueWidget which just displays the name of the plug,
# with the popup action menu for the plug.
#
# Supported plug metadata :
#
# - "labelPlugValueWidget:renameable"
class LabelPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, horizontalAlignment=GafferUI.Label.HorizontalAlignment.Left, verticalAlignment=GafferUI.Label.VerticalAlignment.Center, **kw ) :
GafferUI.PlugValueWidget.__init__( self, QtWidgets.QWidget(), plug, **kw )
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins( 0, 0, 0, 0 )
layout.setSizeConstraint( QtWidgets.QLayout.SetMinAndMaxSize )
self._qtWidget().setLayout( layout )
self.__label = GafferUI.NameLabel(
plug,
horizontalAlignment = horizontalAlignment,
verticalAlignment = verticalAlignment,
formatter = self.__formatter,
)
self.__label._qtWidget().setObjectName( "gafferPlugLabel" )
layout.addWidget( self.__label._qtWidget() )
self.__editableLabel = None # we'll make this lazily as needed
# connecting at group 0 so we're called before the slots
# connected by the NameLabel class.
self.__label.dragBeginSignal().connect( 0, Gaffer.WeakMethod( self.__dragBegin ), scoped = False )
self.__label.dragEndSignal().connect( 0, Gaffer.WeakMethod( self.__dragEnd ), scoped = False )
Gaffer.Metadata.plugValueChangedSignal().connect( Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False )
self._addPopupMenu( self.__label )
self.setPlug( plug )
def label( self ) :
return self.__label
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
self.__label.setGraphComponent( plug )
if self.__editableLabel is not None :
self.__editableLabel.setGraphComponent( plug )
self.__updateDoubleClickConnection()
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__label.setHighlighted( highlighted )
def getToolTip( self ) :
result = GafferUI.PlugValueWidget.getToolTip( self )
if self.getPlug() is not None :
if result :
result += "\n"
result += "## Actions\n\n"
result += "- Left drag to connect\n"
if hasattr( self.getPlug(), "getValue" ) :
result += "- Shift+left or middle drag to transfer value"
return result
def _updateFromPlug( self ) :
plug = self.getPlug()
valueChanged = plug.getInput() is not None
if not valueChanged and isinstance( plug, Gaffer.ValuePlug ) :
with self.getContext() :
if Gaffer.NodeAlgo.hasUserDefault( plug ) :
try:
valueChanged = not Gaffer.NodeAlgo.isSetToUserDefault( plug )
except:
# an error here should not cause the ui to break, specially since the value widget corresponding could be indicating the error itself
valueChanged = True
else :
try:
valueChanged = not plug.isSetToDefault()
except:
# an error here should not cause the ui to break, specially since the value widget corresponding could be indicating the error itself
valueChanged = True
self.__setValueChanged( valueChanged )
# Sets whether or not the label be rendered in a ValueChanged state.
def __setValueChanged( self, valueChanged ) :
if valueChanged == self.__getValueChanged() :
return
self.__label._qtWidget().setProperty( "gafferValueChanged", GafferUI._Variant.toVariant( valueChanged ) )
self.__label._repolish()
def __getValueChanged( self ) :
if "gafferValueChanged" not in self.__label._qtWidget().dynamicPropertyNames() :
return False
return GafferUI._Variant.fromVariant( self.__label._qtWidget().property( "gafferValueChanged" ) )
def __dragBegin( self, widget, event ) :
# initiate a drag containing the value of the plug
# for shift-left drag or a middle drag. initiate a
# drag containing the plug for a straight left-drag.
shift = event.modifiers & event.Modifiers.Shift
left = event.buttons == event.Buttons.Left
middle = event.buttons == event.Buttons.Middle
if ( shift and left ) or middle :
if not hasattr( self.getPlug(), "getValue" ) :
return None
GafferUI.Pointer.setCurrent( "values" )
with self.getContext() :
return self.getPlug().getValue()
elif left :
GafferUI.Pointer.setCurrent( "plug" )
return self.getPlug()
def __dragEnd( self, widget, event ) :
GafferUI.Pointer.setCurrent( None )
def __updateDoubleClickConnection( self ) :
self.__labelDoubleClickConnection = None
if self.getPlug() is None or not Gaffer.Metadata.value( self.getPlug(), "labelPlugValueWidget:renameable" ) :
return
self.__labelDoubleClickConnection = self.__label.buttonDoubleClickSignal().connect( Gaffer.WeakMethod( self.__labelDoubleClicked ) )
def __labelDoubleClicked( self, label, event ) :
assert( label is self.__label )
if Gaffer.MetadataAlgo.readOnly( self.getPlug() ) :
return
if self.__editableLabel is None :
self.__editableLabel = GafferUI.NameWidget( self.getPlug() )
self.__editableLabel._qtWidget().setMinimumSize( self.label()._qtWidget().minimumSize() )
self.__editableLabel._qtWidget().setMaximumSize( self.label()._qtWidget().maximumSize() )
# Connect at group 0 so we're called before the NameWidget's own slots.
self.__labelEditingFinishedConnection = self.__editableLabel.editingFinishedSignal().connect( 0, Gaffer.WeakMethod( self.__labelEditingFinished ) )
self._qtWidget().layout().insertWidget( 0, self.__editableLabel._qtWidget() )
self.__label.setVisible( False )
self.__editableLabel.setVisible( True )
self.__editableLabel.setSelection( 0, len( self.__editableLabel.getText() ) )
self.__editableLabel.grabFocus()
def __labelEditingFinished( self, nameWidget ) :
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
# Do what the NameWidget would have done for us anyway, so we
# can group it with the metadata deregistration in the undo queue.
self.getPlug().setName( nameWidget.getText() )
# Remove any metadata label which would mask the name - if a user
# has gone to the trouble of setting a sensible name, then it should
# take precedence.
Gaffer.Metadata.deregisterValue( self.getPlug(), "label" )
self.__label.setVisible( True )
self.__editableLabel.setVisible( False )
# Return True so that the NameWidget's handler isn't run, since we
# did all the work ourselves.
return True
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.getPlug() is None :
return
if key=="label" and Gaffer.MetadataAlgo.affectedByChange( self.getPlug(), nodeTypeId, plugPath, plug ) :
# The NameLabel doesn't know that our formatter is sensitive
# to the metadata, so give it a little kick.
self.__label.setFormatter( self.__formatter )
@staticmethod
def __formatter( graphComponents ) :
if graphComponents :
label = Gaffer.Metadata.value( graphComponents[-1], "label" )
if label is not None :
return label
return GafferUI.NameLabel.defaultFormatter( graphComponents )
| |
import pygame
import random
import sys
pygame.init()
# Initialize variables
version = 1.0
display_width = 800
display_heigth = 600
resources_path = 'resources/'
black = (0,0,0)
white = (255,255,255)
red = (200,0,0)
green = (0,200,0)
blue = (0,0,200)
bright_red = (255,0,0)
bright_green = (0,255,0)
bright_blue = (0,0,255)
purple = (148,0,211)
yellow = (255,255,0)
car_width = 56
car_height = 80
car_speed = 5
base_shoot_cd = 30
pause = False
sound_playing = False
difficulty_adjusted = False
gameDisplay = pygame.display.set_mode((display_width,display_heigth))
pygame.display.set_caption('NEW GTA TOTALLY NOT OVERHYPED KAPPA GAME nomansbuy 2.0')
clock = pygame.time.Clock()
carImg1 = pygame.image.load(resources_path + 'car.png')
carImg2 = pygame.image.load(resources_path + 'BestGameCarLmao.png')
carImg3 = pygame.image.load(resources_path + 'dildowcar.png')
carImg4 = pygame.image.load(resources_path + 'lambo.png')
icon = pygame.image.load(resources_path + 'bin.ico')
img_bg2 = pygame.image.load(resources_path + 'sanicpepe.png')
rocket = pygame.image.load(resources_path + 'rocket.png')
selected_car = carImg1
crash_sound = pygame.mixer.Sound(resources_path + 'Lol U Died.wav')
pause_sound = pygame.mixer.Sound(resources_path + 'GetUrAssBackThere.wav')
intro_sound = pygame.mixer.Sound(resources_path + 'DialUp Internet.wav')
intro_sound2 = pygame.mixer.Sound(resources_path + 'Oh Hello There.wav')
ding_sound = pygame.mixer.Sound(resources_path + 'Ding.wav')
explosion_sound = pygame.mixer.Sound(resources_path + 'Explosion.wav')
explosion_sound2 = pygame.mixer.Sound(resources_path + 'Object Crash Sound.wav')
splat_sound = pygame.mixer.Sound(resources_path + 'Splat.wav')
play_music = pygame.mixer.music.load(resources_path + 'despacito.wav')
pygame.display.set_icon(icon)
def draw_score(pretext, count, location):
font = pygame.font.SysFont(None, 24)
text = font.render(pretext + str(count) + " nuts", True, black)
gameDisplay.blit(text, location)
def things(x, y, w, h, color):
pygame.draw.rect(gameDisplay, color, [x, y, w, h])
def car(x,y):
gameDisplay.blit(selected_car, (x,y))
def text_objects(text, font):
textSurface = font.render(text, True, black)
return textSurface, textSurface.get_rect()
def title_display(text, font, x, y):
TextSurf, TextRect = text_objects(text, font)
TextRect.center = (x, y)
gameDisplay.blit(TextSurf, TextRect)
def text_display(text, font, location):
TextSurf, TextRect = text_objects(text, font)
TextRect = location
gameDisplay.blit(TextSurf, TextRect)
def difficulty_settings(score):
global play_music
global car_speed
global difficulty_adjusted
if score < 10:
gameDisplay.fill(white)
elif score < 20:
gameDisplay.fill(green)
elif score < 30:
gameDisplay.fill(blue)
else:
gameDisplay.fill(yellow)
gameDisplay.blit(img_bg2, (0,0))
if not difficulty_adjusted:
if score == 10:
car_speed += 3
pygame.mixer.music.stop()
play_music = pygame.mixer.music.load(resources_path + 'despacito2.wav')
pygame.mixer.music.play(-1)
difficulty_adjusted = True
elif score == 20:
car_speed += 3
pygame.mixer.music.stop()
play_music = pygame.mixer.music.load(resources_path + 'despacito3.wav')
pygame.mixer.music.play(-1)
difficulty_adjusted = True
elif score == 30:
car_speed += 3
pygame.mixer.music.stop()
play_music = pygame.mixer.music.load(resources_path + 'BITCHES AINT SHIT.wav')
pygame.mixer.music.play(-1)
difficulty_adjusted = True
if score == 19 or score == 29:
difficulty_adjusted = False
def crash():
global play_music
pygame.mixer.music.stop()
pygame.mixer.Sound.play(crash_sound)
play_music = pygame.mixer.music.load(resources_path + 'despacito.wav')
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitgame()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
unpause()
title_display("You ded boi", pygame.font.Font('freesansbold.ttf', 100), display_width/2, display_heigth/2)
button("Cum agen", 150, 450, 100, 50, green, bright_green, game_loop)
button("Rope self", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def quitgame():
pygame.quit()
# quit()
sys.exit()
def button(msg, x, y, w, h, c, c_hover, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x < mouse[0] < x+w and y < mouse[1] < y+h:
pygame.draw.rect(gameDisplay, c_hover, (x,y,w,h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(gameDisplay, c, (x,y,w,h))
title_display(msg, pygame.font.Font('freesansbold.ttf', 20), x+w/2, y+h/2)
def car_button(car_img, x, y, cw, ch, cs):
global selected_car, car_width, car_height, car_speed
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
w, h = 60, 80
if x < mouse[0] < x+w and y < mouse[1] < y+h:
if click[0] == 1:
selected_car = car_img
car_width = cw
car_height = ch
car_speed = cs
if selected_car == car_img:
pygame.draw.rect(gameDisplay, bright_green, (x,y,w,h))
else:
pygame.draw.rect(gameDisplay, blue, (x,y,w,h))
gameDisplay.blit(pygame.transform.scale(car_img, (cw, 80)), (x+w/2-cw/2,y))
def unpause():
global pause
pause = False
def paused():
pygame.mixer.music.stop()
pygame.mixer.Sound.play(pause_sound)
while pause:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitgame()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
unpause()
gameDisplay.fill(white)
title_display("paused ay bruv", pygame.font.Font('freesansbold.ttf', 80), display_width/2, display_heigth/2)
button("LEZ GO", 150, 450, 100, 50, green, bright_green, unpause)
button("End lyf", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
pygame.mixer.music.play(-1)
def game_intro():
pygame.mixer.music.stop()
pygame.mixer.Sound.play(random.choice([intro_sound, intro_sound2]))
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitgame()
if event.type == pygame.K_KP_ENTER or event.type == pygame.K_SPACE:
intro = False
game_loop()
gameDisplay.fill(white)
text_display('Version: ' + str(version), pygame.font.Font('freesansbold.ttf', 15), (0,0))
title_display("ayy lmaooo sim2k17", pygame.font.Font('freesansbold.ttf', 80), display_width/2, display_heigth/3)
car_button(carImg1, display_width/4, 300, 56, 80, 5)
car_button(carImg2, display_width/4+100, 300, 56, 80, 10)
car_button(carImg3, display_width/4+200, 300, 56, 80, 5)
car_button(carImg4, display_width/4+300, 300, 10, 150, 1)
button("GO~!", 150, 450, 100, 50, green, bright_green, game_loop)
button("Fkoff", 550, 450, 100, 50, red, bright_red, quitgame)
pygame.display.update()
clock.tick(15)
def game_loop():
global pause, car_speed
pygame.mixer.music.play(-1)
x = display_width * 0.5
# y = display_heigth * 0.8
y = display_heigth - car_height - 20
x_change = 0
thing_x = random.randrange(0, display_width)
thing_y = -600
thing_speed = 5
thing_w = 100
thing_h = 100
shoot_cd = 0
rocket_x = -1
rocket_y = -1
if selected_car == carImg1 or selected_car == carImg3:
car_speed = 5
elif selected_car == carImg2:
car_speed = 10
else:
car_speed = 1
dodged = 0
destroyed = 0
moves = list()
gameExit = False
while not gameExit:
#input handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitgame()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
gameExit = True
game_intro()
if event.key == pygame.K_LEFT:
moves.insert(0, 'left')
if event.key == pygame.K_RIGHT:
moves.insert(0, 'right')
if event.key == pygame.K_p:
pause = True
paused()
if event.key == pygame.K_SPACE or event.key == pygame.K_UP:
if selected_car == carImg3 and shoot_cd == 0:
rocket_x = x + car_width/2
rocket_y = y
shoot_cd = base_shoot_cd
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
moves.pop(moves.index("left"))
if event.key == pygame.K_RIGHT:
moves.pop(moves.index("right"))
if len(moves) > 0:
if moves[0] == "left":
x_change = -car_speed
elif moves[0] == "right":
x_change = car_speed
else:
x_change = 0
x += x_change
thing_y += thing_speed
rocket_y -= 20
# set background
difficulty_settings(dodged)
things(thing_x, thing_y, thing_w, thing_h, black)
car(x,y)
gameDisplay.blit(rocket, (rocket_x, rocket_y))
draw_score("Juked ", dodged, (0,0))
if selected_car == carImg3:
draw_score("Rekt ", destroyed, (0, 30))
if x > display_width - car_width or x < 0: #car hitting border
crash()
if thing_y < rocket_y < thing_y + thing_h and thing_x < rocket_x < thing_x + thing_w: #rocket hitting thing
pygame.mixer.Sound.play(splat_sound)
thing_x = -600 - thing_y
thing_y = random.randrange(0, display_width)
destroyed += 1
dodged += 1
if thing_y > display_heigth: #object out of screen
pygame.mixer.Sound.play(ding_sound)
thing_y = 0 - thing_h
thing_x = random.randrange(0, display_width)
dodged += 1
thing_speed += 1
if y < thing_y + thing_h and y + car_height > thing_y: #collision
if thing_x < x < thing_x + thing_w or thing_x < x+car_width < thing_x + thing_w:
pygame.mixer.Sound.play(random.choice([explosion_sound, explosion_sound2]))
crash()
if shoot_cd > 0:
shoot_cd -= 1
pygame.display.update()
clock.tick(60)
if __name__ == '__main__':
game_intro()
quitgame()
| |
import os
import sys
import gzip
import shutil
import base64
import logging
import optparse
import mimetypes
import subprocess
from tempfile import mkdtemp, mkstemp
try:
import cssutils
except ImportError:
cssutils = None
try:
from urllib.parse import urlparse
except ImportError:
#python 2
from urlparse import urlparse
from pyramid.static import resolve_asset_spec
from pkg_resources import (get_distribution, resource_listdir, resource_isdir,
resource_filename)
_PY3 = sys.version_info[0] == 3
def includeme(config):
config.add_directive('add_cdn_view', add_cdn_view)
def add_cdn_view(config, name, path, encodings=()):
"""Add a view used to render static assets.
This calls ``config.add_static_view`` underneath the hood.
If name is not an absolute URL, ``add_static_view`` is called directly with
``name`` and ``path`` unchanged.
If ``name`` is an absolute URL, the project name and version from ``path``
are added to it before calling ``add_static_view``. This url scheme matches
the paths to which the resources are extracted to by ``extract_cmd``.
For example, if ``name`` is ``http://cdn.example.com/path``, ``path`` is
``mypackage`` and the current version of ``mypackage`` is ``1.2.3`` then
``add_static_view is called with this url:
http://cdn.example.com/path/mypackage/1.2.3
Note that `path` is the path to the resource within the package.
"""
package, filename = resolve_asset_spec(path, config.package_name)
if package is None:
raise ValueError("Package relative paths are required")
path = '%s:%s' % (package, filename)
if urlparse(name).scheme:
# Name is an absolute url to CDN
while name.endswith('/'):
name = name[:-1]
dist = get_distribution(package)
for enc in [None] + list(encodings):
parts = [name, dist.project_name, dist.version]
p = path
if enc:
parts.append(enc)
pack, p = p.split(':', 1)
p = ':'.join([pack, '/'.join([enc, p])])
parts.append(filename)
n = '/'.join(parts)
config.add_static_view(name=n, path=p)
else:
if encodings:
raise NotImplementedError('Refusing to guess what a static filesystem view with encodings mean (for now)')
config.add_static_view(name=name, path=path)
def extract_cmd(resources=None, target=None, yui_compressor=False,
ignore_stamps=False, encodings=None, args=sys.argv):
"""Export from the command line"""
parser = optparse.OptionParser(usage="usage: %prog [options]")
res_help = "Resource to dump (may be repeated)."
if resources is not None:
res_help += '\nDefaults are: %s' % ', '.join(resources)
parser.add_option("--resource",
dest="resources",
action='append',
help=res_help)
parser.add_option("--yui-compressor", dest="yui_compressor",
action="store_true",
help=("Compress the files with the yui-compressor "
"(must be on the path)"))
parser.add_option("--no-yui-compressor", dest="yui_compressor",
action="store_false",
help="Do not compress the files with yui-compressor")
parser.add_option("--cssutils-minify", dest="cssutils_minify",
action="store_true",
help=("Use the python cssutils package to minify the"
"CSS"))
parser.add_option("--cssutils-resolve-imports",
dest="cssutils_resolve_imports",
action="store_true",
help=("Use the python cssutils package to resolve"
"@import statements in the CSS"))
parser.add_option("--target", dest="target",
help=("Where to put the resources (can be the name of a "
"local directory, or a url on S3 "
"(eg: s3://bucket_name/path) you will need boto "
"available to push the files"))
parser.add_option("--encoding", dest="encodings",
action="append",
help=("This option exists to support serving compressed "
"resources over HTTP. For each --encoding a "
"compressed copy of the file will be uploaded "
"to the target with the relevant 'Content-Encoding' "
"header. The local-filesystem does not support this. "
"The path of the encoded file upload is prefixed by "
"the encoding."))
parser.add_option("--ignore-stamps", dest="ignore_stamps",
action="store_true",
help=("Stamp files are placed in the target to optimize "
"repeated uploads. If these files are found the "
"resource upload is skipped. Use this option to "
"ignore these files and always updload"))
parser.add_option("--aws-access-key", dest="aws_access_key",
help="AWS access key")
parser.add_option("--aws-secret-key", dest="aws_secret_key",
help="AWS secret key")
parser.add_option("--loglevel", dest="loglevel",
help="The logging level to use.",
default='WARN')
parser.set_defaults(
yui_compressor=yui_compressor,
target=target,
cssutils_resolve_imports=None,
cssutils_minify=None,
ignore_stamps=ignore_stamps)
options, args = parser.parse_args(args)
if not options.encodings:
# set our default
options.encodings = encodings
if not options.resources:
# set our default
options.resources = resources
loglevel = getattr(logging, options.loglevel)
logging.basicConfig(level=loglevel)
if not options.target:
raise AssertionError("Target is required")
if not options.resources:
raise AssertionError("Resources are required")
kw = {}
for opt in ['aws_access_key',
'aws_secret_key',
'encodings',
'cssutils_minify',
'cssutils_resolve_imports']:
v = getattr(options, opt, None)
if v is not None:
kw[opt] = v
assert len(args) == 1, args
extract(options.resources,
options.target,
options.yui_compressor,
ignore_stamps=options.ignore_stamps,
**kw)
def _never_has_stamp(dist, path):
return False
def extract(resources, target, yui_compressor=True, ignore_stamps=False,
cssutils_resolve_imports=False,
cssutils_minify=False,
**kw):
"""Export the resources"""
putter = _get_putter(target, **kw)
try:
stamps = mkdtemp()
try:
has_stamp = _never_has_stamp
if not ignore_stamps:
has_stamp = putter.has_stamp
r_files = _walk_resources(resources, has_stamp, stamps)
pipeline = []
try:
# construct pipeline from config
# in the right order
if cssutils_resolve_imports or cssutils_minify:
pipeline.append(_CSSUtils(
resolve_imports=cssutils_resolve_imports,
minify=cssutils_minify))
if yui_compressor:
pipeline.append(_YUICompressor())
# build iterator out of pipelines
for p in pipeline:
r_files = p.process(r_files)
# execute pipeline
putter.put(r_files)
finally:
# dispose all bits of the pipeline to clean temporary files
for p in pipeline:
p.dispose()
finally:
shutil.rmtree(stamps)
finally:
putter.close()
def _get_putter(target, **kw):
schema = target.split(':')[0]
putter = {
'file': _PutLocal,
's3': _PutS3}[schema]
return putter(target, **kw)
def config_static(config, static_resources, static_cdn=None):
"""Configure a Pyramid application with a list of static resources.
.. warning::
This method is deprecated, please use the add_cdn_view directive
instead. At same future point ``config_static`` will be removed.
If static_cdn is None, the resource will be configured to use the local
server. Ideal for development.
If static_cdn is a URL, resources will be loaded from there under this
schema:
http://cdn.example.com/path/${package_name}/${package_version}/path
Note that `path` is the path to the resource within the package.
"""
if static_cdn is None:
for name, path in static_resources:
assert ':' in path, 'Is not relative to a package: %r' % path
add_cdn_view(config, name=name, path=path)
else:
for name, path in static_resources:
add_cdn_view(config, name=static_cdn, path=path)
def _walk_resource_directory(pname, resource_directory):
"""Walk a resource directory and yield all files.
Files are yielded as the path to the resource.
"""
yield resource_directory, 'dir'
for member in resource_listdir(pname, resource_directory):
if member.startswith('.'):
continue
r_path = '/'.join([resource_directory, member])
if resource_isdir(pname, r_path):
logging.info("_walk_resource_directory: Recursing into directory "
"%s:%s", pname, r_path)
for r in _walk_resource_directory(pname, r_path):
yield r
else:
logging.info("_walk_resource_directory: Found resource "
"%s:%s", pname, r_path)
yield r_path, 'file'
def _walk_resources(resources, has_stamp, tmpdir):
for res in resources:
pname, r_path = res.split(':', 1)
dist = get_distribution(pname)
if has_stamp(dist, r_path):
logging.info("Stamp found, skipping %s:%s", pname, r_path)
continue
logging.info("Walking %s:%s", pname, r_path)
resources = _walk_resource_directory(pname, r_path)
for r, type in resources:
fs_r = resource_filename(pname, r)
yield _to_dict(r, fs_r, pname, dist, type)
handle, fs_r = mkstemp(dir=tmpdir)
f = os.fdopen(handle, 'w')
try:
f.write('Stamping %s' % res)
finally:
f.close()
yield _to_dict(r_path, fs_r, pname, dist, 'stamp')
def _stamp_resource(dist, resource_path, encodings=None):
_stamp_dist = get_distribution('van.static')
r_path = resource_path
if _PY3:
r_path32 = base64.b32encode(r_path.encode('utf-8')).decode('ascii')
else:
r_path32 = base64.b32encode(r_path)
if not encodings:
return _stamp_dist, '%s-%s-%s.stamp' % (dist.project_name, dist.version, r_path32)
encodings = '-'.join(sorted(encodings))
return _stamp_dist, '%s-%s-%s-%s.stamp' % (dist.project_name, dist.version, encodings, r_path32)
class _PutLocal:
_hard_link = True
def __init__(self, target):
assert target.startswith('file:///')
self._target_dir = target = target[7:]
logging.info("Putting resources in %s", self._target_dir)
def close(self):
pass
def _if_not_exist(self, func, *args, **kw):
# call for file operations that may fail with
# OSError: [Errno 17] File exists
try:
func(*args, **kw)
except OSError:
e = sys.exc_info()[1]
if e.errno != 17:
raise
def has_stamp(self, dist, resource_path):
stamp_dist, stamp_path = _stamp_resource(dist, resource_path)
return self.exists(stamp_dist, stamp_path)
def exists(self, dist, path):
target = os.path.join(self._target_dir, dist.project_name,
dist.version, path)
return os.path.exists(target)
def put(self, files):
proj_dirs = set([])
for f in files:
rpath = f['resource_path']
fs_rpath = f['filesystem_path']
pname = f['distribution_name']
dist = f['distribution']
type = f['type']
if type == 'stamp':
dist, rpath = _stamp_resource(dist, rpath)
type = 'file'
fs_path = rpath.replace('/', os.sep) # enough for windows?
target = os.path.join(self._target_dir, dist.project_name,
dist.version, fs_path)
if pname not in proj_dirs:
self._if_not_exist(os.makedirs, os.path.join(self._target_dir,
dist.project_name,
dist.version))
proj_dirs.add(pname)
if type == 'file':
self._copy(fs_rpath, target)
else:
self._if_not_exist(os.makedirs, target)
def _copy(self, source, target):
if self._hard_link:
try:
logging.debug("Hard linking %s to %s", source, target)
os.link(source, target) # hard links are fast!
except:
logging.debug("Hard linking failed, falling back to normal copy")
e = sys.exc_info()[1]
if isinstance(e, OSError) and e.errno == 17:
# file exists, let's try removing it
os.remove(target)
else:
# another error, don't try hard linking after first failure
# this may be because the files are on differnt devices or windows
self._hard_link = False
self._copy(source, target)
else:
logging.debug("Copying %s to %s", source, target)
shutil.copy(source, target)
_GZ_MIMETYPES = frozenset([
'text/plain',
'text/html',
'text/css',
'application/javascript',
'application/x-javascript',
'text/xml',
'application/json',
'application/xml',
'image/svg+xml'])
class _PutS3:
_cached_bucket = None
def __init__(self, target, aws_access_key=None, aws_secret_key=None, encodings=()):
# parse URL by hand as urlparse in python2.5 doesn't
assert target.startswith('s3://')
target = target[5:]
bucket, path = target.split('/', 1)
self._encodings = encodings
self._bucket_name = bucket
self._path = '/%s' % path
self._aws_access_key = aws_access_key
self._aws_secret_key = aws_secret_key
self._tmpdir = mkdtemp()
def _get_temp_file(self):
handle, filename = mkstemp(dir=self._tmpdir)
return os.fdopen(handle, 'wb'), filename
@property
def _bucket(self):
if self._cached_bucket is None:
S3Connection = self._get_conn_class()
conn = S3Connection(self._aws_access_key, self._aws_secret_key)
self._cached_bucket = conn.get_bucket(self._bucket_name, validate=False)
return self._cached_bucket
def has_stamp(self, dist, resource_path):
stamp_dist, stamp_path = _stamp_resource(dist, resource_path, encodings=self._encodings)
return self.exists(stamp_dist, stamp_path)
def exists(self, dist, path):
target = '/'.join([self._path, dist.project_name, dist.version,
path])
return self._bucket.get_key(target) is not None
def _get_conn_class(self):
# lazy import to not have a hard dependency on boto
# Also so we can mock them in tests
from boto.s3.connection import S3Connection
return S3Connection
def _get_key_class(self):
from boto.s3.key import Key
return Key
def _should_gzip(self, mimetype):
return mimetype in _GZ_MIMETYPES
def close(self):
if self._tmpdir is not None:
logging.debug("_S3Putter: removing temp workspace: %s",
self._tmpdir)
shutil.rmtree(self._tmpdir)
self._tmpdir = None
def put(self, files):
logging.info("S3: putting resources to bucket %s with encodings: %s", self._bucket_name, self._encodings)
Key = self._get_key_class()
bucket = self._bucket
encodings = [None] + list(self._encodings)
for f in files:
if f['type'] == 'dir':
continue
elif f['type'] == 'stamp':
dist, rpath = _stamp_resource(f['distribution'], f['resource_path'], encodings=self._encodings)
target = '/'.join([self._path, dist.project_name, dist.version, rpath])
logging.info("Stamping resource %s:%s in S3: %s", f['distribution_name'], f['resource_path'], target)
key = Key(bucket)
key.key = target
key.set_contents_from_filename(
f['filesystem_path'],
reduced_redundancy=True,
policy='public-read')
continue
dist = f['distribution']
prefix = '/'.join([self._path, dist.project_name, dist.version])
filename = f['resource_path'].split('/')[-1]
mimetype = mimetypes.guess_type(filename)[0]
for enc in encodings:
headers = {'Cache-Control': 'max-age=32140800'}
if mimetype:
headers['Content-Type'] = mimetype
if enc is None:
target = '/'.join([prefix, f['resource_path']])
fs_path = f['filesystem_path']
elif enc == 'gzip':
target = '/'.join([prefix, enc, f['resource_path']])
if self._should_gzip(mimetype):
headers['Content-Encoding'] = 'gzip'
source = f['filesystem_path']
c_file, fs_path = self._get_temp_file()
try:
file = gzip.GzipFile(filename, 'wb', 9, c_file)
try:
source = open(source, 'rb')
try:
file.write(source.read())
finally:
source.close()
finally:
file.close()
finally:
c_file.close()
else:
fs_path = f['filesystem_path']
else:
raise NotImplementedError()
logging.info("putting to S3: %s with headers: %s", target, headers)
key = Key(bucket)
key.key = target
key.set_contents_from_filename(
fs_path,
reduced_redundancy=True,
headers=headers,
policy='public-read')
def _to_dict(resource_path, filesystem_path, distribution_name, distribution, type):
"""Convert a tuple of values to a more plugin friendly dictionary.
- `resource_path` is the path to file within resource (distribution)
- `filesystem_path` is path to file on local filesystem
- `distribution` is the pkg_resources distribution object
- `distribution_name` is the pkg_resources distribution name
- `type` is a string indicating the resource type, `file` for a filesystem file and `dir` for a directory
"""
return locals()
class _YUICompressor:
def __init__(self):
self._tmpdir = mkdtemp()
self._counter = 0
def dispose(self):
if self._tmpdir is not None:
logging.debug("_YUICompressior: removing temp workspace: %s",
self._tmpdir)
shutil.rmtree(self._tmpdir)
self._tmpdir = None
def __del__(self):
if self._tmpdir is not None:
raise Exception('%s was not disposed before garbage collection' % self)
self.dispose()
def process(self, files):
for f in files:
rpath = f['resource_path']
f_type = f['type']
if f_type == 'file' and rpath.endswith('.js'):
type = 'js'
elif f_type == 'file' and rpath.endswith('.css'):
type = 'css'
else:
yield f
continue
self._counter += 1
fs_rpath = f['filesystem_path']
target = os.path.join(self._tmpdir, str(self._counter) + '-' +
os.path.basename(fs_rpath))
args = ['yui-compressor', '--type', type, '-o', target, fs_rpath]
logging.debug('Compressing with YUI Compressor %s file, '
'from %s to %s', type, fs_rpath, target)
subprocess.check_call(args)
f['filesystem_path'] = target
yield f
class _CSSUtils:
"""Filter to inline CSS @import statements"""
def __init__(self, resolve_imports=False, minify=False):
if cssutils is None:
import cssutils as err # cssutils needs to be installed
self._tmpdir = mkdtemp()
self._counter = 0
self.serializer = cssutils.CSSSerializer()
self.resolve_imports = resolve_imports
if minify:
self.serializer.prefs.useMinified()
def dispose(self):
if self._tmpdir is not None:
logging.debug("_CSSImportInliner: removing temp workspace: %s",
self._tmpdir)
shutil.rmtree(self._tmpdir)
self._tmpdir = None
def process(self, files):
for f in files:
if not f['resource_path'].endswith('.css') or f['type'] != 'file':
yield f
continue
self._counter += 1
fs_rpath = f['filesystem_path']
sheet = cssutils.parseFile(fs_rpath)
sheet.setSerializer(self.serializer)
for url in cssutils.getUrls(sheet):
u = urlparse(url)
if u.scheme or u.netloc or not u.path.startswith('./'):
logging.warning('non-relative URL used in CSS: %s' % url)
if self.resolve_imports:
sheet = cssutils.resolveImports(sheet)
target = os.path.join(
self._tmpdir,
str(self._counter) + '-' + os.path.basename(fs_rpath))
out_f = open(target, 'wb')
try:
out_f.write(sheet.cssText)
finally:
out_f.close()
f['filesystem_path'] = target
yield f
if __name__ == "__main__":
extract_cmd()
| |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_config import cfg
import testtools
from webob import exc
import webtest
from neutron.api import extensions
from neutron.api.v2 import router
from neutron.common import config
from neutron.common import constants
from neutron.common import exceptions
from neutron import context
from neutron.db.quota import driver
from neutron import quota
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit import testlib_api
DEFAULT_QUOTAS_ACTION = 'default'
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_get_path = test_base._get_path
class QuotaExtensionTestCase(testlib_api.WebTestCase):
def setUp(self):
super(QuotaExtensionTestCase, self).setUp()
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin('ml2')
quota.QUOTAS = quota.QuotaEngine()
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.supported_extension_aliases = ['quotas']
# QUOTAS will register the items in conf when starting
# extra1 here is added later, so have to do it manually
resource_registry.register_resource_by_name('extra1')
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
ext_middleware = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
self.api = webtest.TestApp(ext_middleware)
# Initialize the router for the core API in order to ensure core quota
# resources are registered
router.APIRouter()
def tearDown(self):
self.api = None
self.plugin = None
super(QuotaExtensionTestCase, self).tearDown()
def _test_quota_default_values(self, expected_values):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
quota = self.deserialize(res)
for resource, expected_value in expected_values.items():
self.assertEqual(expected_value,
quota['quota'][resource])
class QuotaExtensionDbTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.db.quota.driver.DbQuotaDriver',
group='QUOTAS')
super(QuotaExtensionDbTestCase, self).setUp()
def test_quotas_loaded_right(self):
res = self.api.get(_get_path('quotas', fmt=self.fmt))
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
self.assertEqual(200, res.status_int)
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
cfg.CONF.set_override(
'quota_network', -10, group='QUOTAS')
cfg.CONF.set_override(
'quota_subnet', -50, group='QUOTAS')
self._test_quota_default_values(
{'network': -1,
'subnet': -1,
'port': -1,
'extra1': -1})
def test_show_default_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_default_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_default_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id,
action=DEFAULT_QUOTAS_ACTION,
fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_show_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_show_quotas_with_owner_tenant(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(10, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_list_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual([], quota['quotas'])
def test_list_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.get(_get_path('quotas', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_with_non_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 'abc'}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_negative_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -2}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_out_of_range_integer_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': constants.DB_INTEGER_MAX_VALUE + 1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_update_quotas_to_unlimited(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': -1}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_exceeding_current_limit(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 120}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=False)
self.assertEqual(200, res.status_int)
def test_update_quotas_with_non_support_resource_returns_400(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'abc': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_update_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['network'])
self.assertEqual(10, quota['quota']['subnet'])
self.assertEqual(50, quota['quota']['port'])
def test_update_attributes(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
quotas = {'quota': {'extra1': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
env2 = {'neutron.context': context.Context('', tenant_id)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env2)
quota = self.deserialize(res)
self.assertEqual(100, quota['quota']['extra1'])
def test_delete_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
# Create a quota to ensure we have something to delete
quotas = {'quota': {'network': 100}}
self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(204, res.status_int)
def test_delete_quotas_without_admin_forbidden_returns_403(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quota_with_unknown_tenant_returns_404(self):
tenant_id = 'idnotexist'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(exc.HTTPNotFound.code, res.status_int)
def test_quotas_loaded_bad_returns_404(self):
try:
res = self.api.get(_get_path('quotas'), expect_errors=True)
self.assertEqual(404, res.status_int)
except Exception:
pass
def test_quotas_limit_check(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
quotas = {'quota': {'network': 5}}
res = self.api.put(_get_path('quotas', id=tenant_id,
fmt=self.fmt),
self.serialize(quotas), extra_environ=env)
self.assertEqual(200, res.status_int)
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=4)
def test_quotas_limit_check_with_invalid_quota_value(self):
tenant_id = 'tenant_id1'
with testtools.ExpectedException(exceptions.InvalidQuotaValue):
quota.QUOTAS.limit_check(context.Context('', tenant_id),
tenant_id,
network=-2)
def test_quotas_limit_check_with_not_registered_resource_fails(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.limit_check,
context.get_admin_context(),
tenant_id,
foobar=1)
def test_quotas_get_tenant_from_request_context(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
quota = self.deserialize(res)
self.assertEqual(quota['tenant']['tenant_id'], tenant_id)
def test_quotas_get_tenant_from_empty_request_context_returns_400(self):
env = {'neutron.context': context.Context('', '',
is_admin=True)}
res = self.api.get(_get_path('quotas/tenant', fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(400, res.status_int)
def test_make_reservation_resource_unknown_raises(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.QuotaResourceUnknown,
quota.QUOTAS.make_reservation,
context.get_admin_context(),
tenant_id,
{'foobar': 1},
plugin=None)
def test_make_reservation_negative_delta_raises(self):
tenant_id = 'tenant_id1'
self.assertRaises(exceptions.InvalidQuotaValue,
quota.QUOTAS.make_reservation,
context.get_admin_context(),
tenant_id,
{'network': -1},
plugin=None)
class QuotaExtensionCfgTestCase(QuotaExtensionTestCase):
fmt = 'json'
def setUp(self):
cfg.CONF.set_override(
'quota_driver',
'neutron.quota.ConfDriver',
group='QUOTAS')
super(QuotaExtensionCfgTestCase, self).setUp()
def test_quotas_default_values(self):
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': 50,
'extra1': -1})
def test_quotas_negative_default_value(self):
cfg.CONF.set_override(
'quota_port', -666, group='QUOTAS')
self._test_quota_default_values(
{'network': 10,
'subnet': 10,
'port': -1,
'extra1': -1})
def test_show_quotas_with_admin(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=True)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env)
self.assertEqual(200, res.status_int)
def test_show_quotas_without_admin_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id + '2',
is_admin=False)}
res = self.api.get(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
def test_update_quotas_forbidden(self):
tenant_id = 'tenant_id1'
quotas = {'quota': {'network': 100}}
res = self.api.put(_get_path('quotas', id=tenant_id, fmt=self.fmt),
self.serialize(quotas),
expect_errors=True)
self.assertEqual(403, res.status_int)
def test_delete_quotas_forbidden(self):
tenant_id = 'tenant_id1'
env = {'neutron.context': context.Context('', tenant_id,
is_admin=False)}
res = self.api.delete(_get_path('quotas', id=tenant_id, fmt=self.fmt),
extra_environ=env, expect_errors=True)
self.assertEqual(403, res.status_int)
class TestDbQuotaDriver(base.BaseTestCase):
"""Test for neutron.db.quota.driver.DbQuotaDriver."""
def test_get_tenant_quotas_arg(self):
"""Call neutron.db.quota.driver.DbQuotaDriver._get_quotas."""
quota_driver = driver.DbQuotaDriver()
ctx = context.Context('', 'bar')
foo_quotas = {'network': 5}
default_quotas = {'network': 10}
target_tenant = 'foo'
with mock.patch.object(driver.DbQuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = quota_driver._get_quotas(ctx,
target_tenant,
default_quotas)
self.assertEqual(quotas, foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
class TestQuotaDriverLoad(base.BaseTestCase):
def setUp(self):
super(TestQuotaDriverLoad, self).setUp()
# Make sure QuotaEngine is reinitialized in each test.
quota.QUOTAS._driver = None
def _test_quota_driver(self, cfg_driver, loaded_driver,
with_quota_db_module=True):
cfg.CONF.set_override('quota_driver', cfg_driver, group='QUOTAS')
with mock.patch.dict(sys.modules, {}):
if (not with_quota_db_module and
'neutron.db.quota.driver' in sys.modules):
del sys.modules['neutron.db.quota.driver']
driver = quota.QUOTAS.get_driver()
self.assertEqual(loaded_driver, driver.__class__.__name__)
def test_quota_db_driver_with_quotas_table(self):
self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver',
'DbQuotaDriver', True)
def test_quota_db_driver_fallback_conf_driver(self):
self._test_quota_driver('neutron.db.quota.driver.DbQuotaDriver',
'ConfDriver', False)
def test_quota_conf_driver(self):
self._test_quota_driver('neutron.quota.ConfDriver',
'ConfDriver', True)
| |
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
import json
from django.core.urlresolvers import reverse_lazy
from django.test import TestCase
from superperms.orgs.models import Organization
from landing.models import SEEDUser as User
from seed.views.main import _get_default_org
from seed.views.accounts import _dict_org, _get_js_role, _get_role_from_js
from superperms.orgs.models import (
ROLE_OWNER,
ROLE_MEMBER,
ROLE_VIEWER,
OrganizationUser,
)
from superperms.orgs.exceptions import InsufficientPermission
from seed.models import BuildingSnapshot, CanonicalBuilding
from public.models import SharedBuildingField
from seed.tests.util import FakeRequest
class AccountsViewTests(TestCase):
"""
Tests of the SEED accounts
"""
def setUp(self):
user_details = {
'username': 'test_user@demo.com',
'password': 'test_pass',
'email': 'test_user@demo.com',
'first_name': 'Johnny',
'last_name': 'Energy',
}
self.user = User.objects.create_user(**user_details)
self.org = Organization.objects.create(name='my org')
self.org.add_member(self.user)
self.client.login(**user_details)
self.fake_request = FakeRequest(user=self.user)
def test_dict_org(self):
"""_dict_org turns our org structure into a json payload."""
expected_single_org_payload = {
'sub_orgs': [],
'owners': [{
'first_name': u'Johnny',
'last_name': u'Energy',
'email': u'test_user@demo.com',
'id': self.user.pk
}],
'number_of_users': 1,
'name': 'my org',
'user_role': 'owner',
'is_parent': True,
'org_id': self.org.pk,
'id': self.org.pk,
'user_is_owner': True,
'num_buildings': 0
}
org_payload = _dict_org(self.fake_request, [self.org])
self.assertEqual(len(org_payload), 1)
self.assertDictEqual(org_payload[0], expected_single_org_payload)
# Now let's make sure that we pick up related buildings correctly.
for x in range(10):
can = CanonicalBuilding.objects.create()
snap = BuildingSnapshot.objects.create()
snap.super_organization = self.org
snap.save()
can.canonical_snapshot = snap
can.save()
expected_single_org_payload['num_buildings'] = 10
self.assertDictEqual(
_dict_org(self.fake_request, [self.org])[0],
expected_single_org_payload
)
def test_dic_org_w_member_in_parent_and_child(self):
"""What happens when a user has a role in parent and child."""
new_org = Organization.objects.create(name="sub")
expected_multiple_org_payload = {
'sub_orgs': [{
'owners': [{
'first_name': u'Johnny',
'last_name': u'Energy',
'email': u'test_user@demo.com',
'id': self.user.pk
}],
'number_of_users': 1,
'name': 'sub',
'sub_orgs': [],
'user_role': 'owner',
'is_parent': False,
'org_id': new_org.pk,
'id': new_org.pk,
'user_is_owner': True,
'num_buildings': 0,
}],
'owners': [{
'first_name': u'Johnny',
'last_name': u'Energy',
'email': u'test_user@demo.com',
'id': self.user.pk
}],
'number_of_users': 1,
'name': 'my org',
'user_role': 'owner',
'is_parent': True,
'org_id': self.org.pk,
'id': self.org.pk,
'user_is_owner': True,
'num_buildings': 0
}
new_org.parent_org = self.org
new_org.save()
new_org.add_member(self.user)
org_payload = _dict_org(self.fake_request, Organization.objects.all())
self.assertEqual(len(org_payload), 2)
self.assertEqual(org_payload[0], expected_multiple_org_payload)
def test_get_organizations(self):
""" tests accounts.get_organizations
"""
resp = self.client.get(
reverse_lazy("accounts:get_organizations"),
content_type='application/json',
)
orgs = json.loads(resp.content)['organizations']
org = orgs[0]
self.assertEquals(org['name'], 'my org')
self.assertEquals(org['number_of_users'], 1)
self.assertDictEqual(
org['owners'][0],
{
'email': u'test_user@demo.com',
'first_name': u'Johnny',
'last_name': u'Energy',
'email': u'test_user@demo.com',
'id': self.user.pk # since this could change
}
)
self.assertTrue(org['user_is_owner'])
def test_get_organization_no_org(self):
"""test for error when no organization_id sent"""
resp = self.client.get(
reverse_lazy("accounts:get_organization"),
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': 'Organization does not exist'
})
def test_get_organization_std_case(self):
"""test normal case"""
resp = self.client.get(
reverse_lazy("accounts:get_organization"),
{'organization_id': self.org.id},
content_type='application/json',
)
org = json.loads(resp.content)['organization']
self.assertEquals(org['name'], 'my org')
self.assertEquals(org['number_of_users'], 1)
self.assertDictEqual(
org['owners'][0],
{
'email': u'test_user@demo.com',
'first_name': u'Johnny',
'last_name': u'Energy',
'email': u'test_user@demo.com',
'id': self.user.pk # since this could change
}
)
self.assertTrue(org['user_is_owner'])
def test_get_organization_user_not_owner(self):
"""test for the case where a user doesn't have access"""
other_org = Organization.objects.create(name='not my org')
other_user = User.objects.create(
username="tester@be.com",
email="tester@be.com",
)
other_org.add_member(other_user)
resp = self.client.get(
reverse_lazy("accounts:get_organization"),
{'organization_id': other_org.id},
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': 'No relationship to organization'
})
def test_get_organization_org_doesnt_exist(self):
"""test for the case where a user doesn't have access"""
resp = self.client.get(
reverse_lazy("accounts:get_organization"),
{'organization_id': self.org.id + 100},
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': 'Organization does not exist'
})
def test_remove_user_from_org_std(self):
"""test removing a user"""
# normal case
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u)
resp = self.client.post(
reverse_lazy("accounts:remove_user_from_org"),
data=json.dumps({'user_id': u.id, 'organization_id': self.org.id}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'success',
})
def test_remove_user_from_org_missing_org_id(self):
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u)
resp = self.client.post(
reverse_lazy("accounts:remove_user_from_org"),
data=json.dumps({'user_id': u.id}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'Organization does not exist'
})
def test_remove_user_from_org_missing_user_id(self):
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u)
resp = self.client.post(
reverse_lazy("accounts:remove_user_from_org"),
data=json.dumps({'organization_id': self.org.id}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'missing the user_id'
})
def test_remove_user_from_org_user_DNE(self):
"""DNE = does not exist"""
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u)
resp = self.client.post(
reverse_lazy("accounts:remove_user_from_org"),
data=json.dumps({'organization_id': self.org.id, 'user_id': 9999}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'user does not exist'
})
def test_remove_user_from_org_org_DNE(self):
"""DNE = does not exist"""
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u)
resp = self.client.post(
reverse_lazy("accounts:remove_user_from_org"),
data=json.dumps({'organization_id': 9999, 'user_id': u.id}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'Organization does not exist'
})
def test__get_js_role(self):
self.assertEquals(_get_js_role(ROLE_OWNER), 'owner')
self.assertEquals(_get_js_role(ROLE_MEMBER), 'member')
self.assertEquals(_get_js_role(ROLE_VIEWER), 'viewer')
def test__get_role_from_js(self):
self.assertEquals(_get_role_from_js('owner'), ROLE_OWNER)
self.assertEquals(_get_role_from_js('member'), ROLE_MEMBER)
self.assertEquals(_get_role_from_js('viewer'), ROLE_VIEWER)
def test_update_role(self):
u = User.objects.create(username="b@b.com", email="b@be.com")
self.org.add_member(u, role=ROLE_VIEWER)
ou = OrganizationUser.objects.get(
user_id=u.id, organization_id=self.org.id)
self.assertEquals(ou.role_level, ROLE_VIEWER)
resp = self.client.put(
reverse_lazy("accounts:update_role"),
data=json.dumps(
{
'organization_id': self.org.id,
'user_id': u.id,
'role': 'member'
}
),
content_type='application/json',
)
ou = OrganizationUser.objects.get(
user_id=u.id, organization_id=self.org.id)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'success'
})
self.assertEquals(ou.role_level, ROLE_MEMBER)
def test_update_role_no_perms(self):
"""
Test trying to change your own role when you aren't an owner.
"""
ou = OrganizationUser.objects.get(user=self.user,
organization=self.org)
ou.role_level = ROLE_MEMBER
ou.save()
url = reverse_lazy('accounts:update_role')
post_data = {'organization_id': self.org.id,
'user_id': self.user.id,
'role': 'owner'}
try:
self.client.put(
url,
data=json.dumps(post_data),
content_type='application/json'
)
except InsufficientPermission:
#Todo: currently superperms just raises an exception, rather
#than returning an HttpResponse. Update this when that changes.
pass
#ensure we didn't just become owner
self.assertFalse(self.org.is_owner(self.user))
def test_bad_save_request(self):
"""
A malformed request should return error-containing json.
"""
url = reverse_lazy('accounts:save_org_settings')
#lacks 'organization' key
post_data = {'organization_id': self.org.id}
res = self.client.put(
url,
data=json.dumps(post_data),
content_type='application/json'
)
response = json.loads(res.content)
#don't really care what the message is
self.assertEqual(response['status'], 'error')
def test_query_threshold(self):
url = reverse_lazy('accounts:save_org_settings')
post_data = {
'organization_id': self.org.id,
'organization': {
'query_threshold': 27,
'name': self.org.name
}
}
self.client.put(
url,
data=json.dumps(post_data),
content_type='application/json'
)
#reload org
org = Organization.objects.get(pk=self.org.pk)
self.assertEqual(org.query_threshold, 27)
def test_get_shared_fields_none(self):
url = reverse_lazy('accounts:get_shared_fields')
res = self.client.get(url, data={'organization_id': self.org.pk})
response = json.loads(res.content)
self.assertEqual(response,
{"status": "success",
"shared_fields": [],
"public_fields": []})
def test_get_shared_fields(self):
field1 = self.org.exportable_fields.create(
name='property_name', field_model='BuildingSnapshot'
)
field2 = self.org.exportable_fields.create(
name='building_count', field_model='BuildingSnapshot'
)
SharedBuildingField.objects.create(
org=self.org, field=field1
)
SharedBuildingField.objects.create(
org=self.org, field=field2
)
url = reverse_lazy('accounts:get_shared_fields')
res = self.client.get(url, data={'organization_id': self.org.pk})
response = json.loads(res.content)
self.assertEqual(response['status'], 'success')
shared_fields = response['shared_fields']
self.assertEqual(len(shared_fields), 2)
self.assertEqual(shared_fields[0]['title'],
'Building Count')
self.assertEqual(shared_fields[0]['sort_column'],
'building_count')
self.assertEqual(shared_fields[1]['title'],
'Property Name')
self.assertEqual(shared_fields[1]['sort_column'],
'property_name')
def test_add_shared_fields(self):
url = reverse_lazy('accounts:save_org_settings')
payload = {
u'organization_id': self.org.pk,
u'organization': {
u'owners': self.user.pk,
u'query_threshold': 2,
u'name': self.org.name,
u'fields': [
{
u'field_type': u'building_information',
u'sortable': True,
u'title': u'PM Property ID',
u'sort_column': u'pm_property_id',
u'class': u'is_aligned_right',
u'link': True,
u'checked': True,
u'static': False,
u'type': u'link',
u'title_class': u''
},
{
u'field_type': u'building_information',
u'sortable': True,
u'title': u'Tax Lot ID',
u'sort_column': u'tax_lot_id',
u'class': u'is_aligned_right',
u'link': True,
u'checked': True,
u'static': False,
u'type': u'link',
u'title_class': u''
}
],
}
}
self.client.put(
url,
json.dumps(payload),
content_type='application/json'
)
fields = self.org.exportable_fields.values_list('name', flat=True)
self.assertTrue('tax_lot_id' in fields)
self.assertTrue('pm_property_id' in fields)
self.assertEqual(len(fields), 2)
def test_update_user(self):
"""test for update_user"""
user_data = {
'user': {
'first_name': 'bob',
'last_name': 'd',
'email': 'some@hgg.com'
}
}
resp = self.client.put(
reverse_lazy("accounts:update_user"),
json.dumps(user_data),
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'success',
'user': {
u'api_key': u'',
u'email': u'some@hgg.com',
u'first_name': u'bob',
u'last_name': u'd'
}
})
def test_get_user_profile(self):
"""test for get_user_profile"""
resp = self.client.put(
reverse_lazy("accounts:get_user_profile"),
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'success',
'user': {
u'api_key': u'',
u'email': u'test_user@demo.com',
u'first_name': u'Johnny',
u'last_name': u'Energy'
}
})
resp = self.client.post(
reverse_lazy("accounts:generate_api_key"),
content_type='application/json',
)
resp = self.client.put(
reverse_lazy("accounts:get_user_profile"),
content_type='application/json',
)
self.assertEquals(
json.loads(resp.content),
{
'status': 'success',
'user': {
u'api_key': User.objects.get(pk=self.user.pk).api_key,
u'email': u'test_user@demo.com',
u'first_name': u'Johnny',
u'last_name': u'Energy'
}
})
def test_generate_api_key(self):
"""test for generate_api_key
will pick up user.api_key when it's ready
"""
resp = self.client.post(
reverse_lazy("accounts:generate_api_key"),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
api_key = user.api_key
self.assertEquals(
json.loads(resp.content),
{
'status': 'success',
'api_key': api_key,
})
def test_set_password(self):
"""test for set_password
"""
password_payload = {
'current_password': 'test_pass',
'password_1': 'new passwordD3',
'password_2': 'new passwordD3'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertTrue(user.check_password('new passwordD3'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'success',
})
def test_set_password_only_put(self):
"""test for set_password only allowing put"""
password_payload = {
'current_password': 'test_pass',
'password_1': 'new password',
'password_2': 'new password'
}
resp = self.client.post(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error', 'message': 'only HTTP PUT allowed',
})
resp = self.client.get(
reverse_lazy("accounts:set_password"),
password_payload,
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error', 'message': 'only HTTP PUT allowed',
})
def test_set_password_error_messages(self):
"""test for set_password produces proper messages"""
# check current password is invalid
password_payload = {
'current_password': 'test_pass INVALID',
'password_1': 'new password',
'password_2': 'new password'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error', 'message': 'current password is not valid',
})
# check passwords don't match
password_payload = {
'current_password': 'test_pass',
'password_1': 'new password',
'password_2': 'non matching password'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error', 'message': 'entered password do not match',
})
def test_set_password_meets_password_reqs(self):
"""test for set_password meets password reqs"""
# check new password is less than 8 chars
password_payload = {
'current_password': 'test_pass',
'password_1': 'new1234',
'password_2': 'new1234'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': 'Invalid Length (Must be 8 characters or more)',
})
# check new password is has uppercase letters
password_payload = {
'current_password': 'test_pass',
'password_1': 'newnewnew',
'password_2': 'newnewnew'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': (
'Must be more complex (Must contain 1 or more uppercase '
'characters)'
),
})
# check new password is has lowercase letters
password_payload = {
'current_password': 'test_pass',
'password_1': 'NEWNEWNEW',
'password_2': 'NEWNEWNEW'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': (
'Must be more complex (Must contain 1 or more lowercase '
'characters)'
),
})
# check new password is has alphanumeric letters
password_payload = {
'current_password': 'test_pass',
'password_1': 'nNEWNEWNEW',
'password_2': 'nNEWNEWNEW'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': (
'Must be more complex (Must contain 1 or more digits)'
),
})
password_payload = {
'current_password': 'test_pass',
'password_1': '12345678',
'password_2': '12345678'
}
resp = self.client.put(
reverse_lazy("accounts:set_password"),
json.dumps(password_payload),
content_type='application/json',
)
user = User.objects.get(pk=self.user.pk)
self.assertFalse(user.check_password('new password'))
self.assertEquals(
json.loads(resp.content),
{
'status': 'error',
'message': 'Based on a common sequence of characters',
})
class AuthViewTests(TestCase):
def setUp(self):
user_details = {
'username': 'test_user@demo.com',
'password': 'test_pass',
'email': 'test_user@demo.com',
'first_name': 'Johnny',
'last_name': 'Energy',
}
self.user = User.objects.create_user(**user_details)
self.org = Organization.objects.create(name='my org')
self.org.add_member(self.user)
self.client.login(**user_details)
def test_is_authorized_base(self):
resp = self.client.post(
reverse_lazy("accounts:is_authorized"),
data=json.dumps({
'organization_id': self.org.id,
'actions': ['requires_owner', 'can_invite_member']
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'success',
'auth': {
'requires_owner': True,
'can_invite_member': True,
}
})
def test_is_authorized_parent_org_owner(self):
other_org = Organization.objects.create(name='not my org')
other_user = User.objects.create(
username="tester@be.com",
email="tester@be.com",
)
other_org.add_member(other_user)
other_org.parent_org = self.org
other_org.save()
resp = self.client.post(
reverse_lazy("accounts:is_authorized"),
data=json.dumps({
'organization_id': other_org.id,
'actions': ['requires_owner', 'can_invite_member']
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'success',
'auth': {
'requires_owner': True,
'can_invite_member': True,
}
})
def test_is_authorized_not_in_org(self):
other_org = Organization.objects.create(name='not my org')
other_user = User.objects.create(
username="tester@be.com",
email="tester@be.com",
)
other_org.add_member(other_user)
resp = self.client.post(
reverse_lazy("accounts:is_authorized"),
data=json.dumps({
'organization_id': other_org.id,
'actions': ['requires_owner', 'can_invite_member']
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'user does not exist'
})
def test_is_authorized_org_DNE(self):
"""DNE == does not exist"""
resp = self.client.post(
reverse_lazy("accounts:is_authorized"),
data=json.dumps({
'organization_id': 99999999,
'actions': ['requires_owner', 'can_invite_member']
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'organization does not exist'
})
def test_is_authorized_actions_DNE(self):
"""DNE == does not exist"""
resp = self.client.post(
reverse_lazy("accounts:is_authorized"),
data=json.dumps({
'organization_id': self.org.id,
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'error',
'message': 'no actions to check'
})
def test_set_default_organization(self):
"""test seed.views.accounts.set_default_organization"""
resp = self.client.post(
reverse_lazy("accounts:set_default_organization"),
data=json.dumps({
'organization': {
'id': self.org.id,
}
}),
content_type='application/json',
)
self.assertDictEqual(
json.loads(resp.content),
{
'status': 'success',
})
# refresh the user
u = User.objects.get(pk=self.user.pk)
self.assertEqual(u.default_organization, self.org)
def test__get_default_org(self):
"""test seed.views.main._get_default_org"""
org_id, org_name, org_role = _get_default_org(self.user)
# check standard case
self.assertEqual(org_id, self.org.id)
self.assertEqual(org_name, self.org.name)
self.assertEqual(org_role, "owner")
# check that the default org was set
u = User.objects.get(pk=self.user.pk)
self.assertEqual(u.default_organization, self.org)
# check that "" is returned for a user without an org
other_user = User.objects.create(
username="tester@be.com",
email="tester@be.com",
)
org_id, org_name, org_role = _get_default_org(other_user)
self.assertEqual(org_id, "")
self.assertEqual(org_name, "")
self.assertEqual(org_role, "")
# check that the user is still in the default org, or update
other_user.default_organization = self.org
other_user.save()
other_user = User.objects.get(pk=other_user.pk)
self.assertEqual(other_user.default_organization, self.org)
# _get_default_org should remove the user from the org and set the
# next available org as default or set to ""
org_id, org_name, org_role = _get_default_org(other_user)
self.assertEqual(org_id, "")
self.assertEqual(org_name, "")
self.assertEqual(org_role, "")
| |
# Authors: Paulius Sarka <paulius.sarka@gmail.com>
#
# Based on sklearn/tree/tree.py (BSD 3 clause)
#
# Licence: BSD 3 clause
from abc import ABC
from abc import abstractmethod
from math import ceil
import numbers
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from uplift.validation.class_weight import compute_sample_weight
from uplift.validation.check import check_array
from uplift.validation.check import check_random_state
from uplift.validation.multiclass import check_classification_targets
from uplift.exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini,
"entropy": _criterion.Entropy,
"uplift_gini": _criterion.UpliftGini,
"uplift_entropy": _criterion.UpliftEntropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(ABC, BaseEstimator):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, group, sample_weight=None, check_input=True, X_idx_sorted=None):
"""Build a decision tree from the training set (X, y, group).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
group : array-like, shape = [n_samples] or [n_samples, n_outputs]
The group values, 0 for control, 1 for target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
group = check_array(group, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
group = np.atleast_1d(group)
expanded_class_weight = None
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
if group.ndim == 1:
group = np.reshape(group, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
# Encode y & group together before passing to the builder.
y = np.copy(2*group + y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
# TODO check if binary
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
# TODO encode group and check if binary
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_uplift(self, X, check_input=True):
"""Predict uplift for X.
Predict the difference in probabilities of positive response between target
and control groups.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
control_0 = proba[:, 0]
control_1 = proba[:, 1]
target_0 = proba[:, 2]
target_1 = proba[:, 3]
control = control_0 + control_1
target = target_0 + target_1
p_control = np.divide(control_1, control, out=np.zeros_like(control), where=control != 0)
p_target = np.divide(target_1, target, out=np.zeros_like(target), where=target != 0)
return p_target - p_control
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
control_0 = proba_k[:, 0]
control_1 = proba_k[:, 1]
target_0 = proba_k[:, 2]
target_1 = proba_k[:, 3]
control = control_0 + control_1
target = target_0 + target_1
p_control = np.where(control == 0, np.zeros_like(control), control_1 / control)
p_target = np.where(target == 0, np.zeros_like(target), target_1 / target)
all_proba.append(p_target - p_control)
return all_proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| |
# Copyright (c) 2014 Tycho Andersen
# Copyright (c) 2014 dequis
# Copyright (c) 2014-2015 Joseph Razik
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2015 reus
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module define a widget that displays icons to launch softwares or commands
when clicked -- a launchbar.
Only png icon files are displayed, not xpm because cairo doesn't support
loading of xpm file.
The order of displaying (from left to right) is in the order of the list.
If no icon was found for the name provided and if default_icon is set to None
then the name is printed instead. If default_icon is defined then this icon is
displayed instead.
To execute a software:
- ('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
To execute a python command in qtile, begin with by 'qshell:'
- ('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
import os.path
import cairocffi
from xdg.IconTheme import getIconPath
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
class LaunchBar(base._Widget):
"""
A widget that display icons to launch the associated command.
Text will displayed when no icon is found.
Widget requirements: pyxdg_.
.. _pyxdg: https://freedesktop.org/wiki/Software/pyxdg/
Parameters
==========
progs :
a list of tuples ``(software_name, command_to_execute, comment)``, for
example::
('thunderbird', 'thunderbird -safe-mode', 'launch thunderbird in safe mode')
('logout', 'qshell:self.qtile.cmd_shutdown()', 'logout from qtile')
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("padding", 2, "Padding between icons"),
(
"default_icon",
"/usr/share/icons/oxygen/256x256/mimetypes/application-x-executable.png",
"Default icon not found",
),
("font", "sans", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None, "Font shadow color, default is None (no shadow)"),
("foreground", "#ffffff", "Text colour."),
]
def __init__(self, progs=None, width=bar.CALCULATED, **config):
base._Widget.__init__(self, width, **config)
if progs is None:
progs = []
self.add_defaults(LaunchBar.defaults)
self.surfaces = {}
self.icons_files = {}
self.icons_widths = {}
self.icons_offsets = {}
# For now, ignore the comments but may be one day it will be useful
self.progs = dict(
enumerate(
[
{
"name": prog[0],
"cmd": prog[1],
"comment": prog[2] if len(prog) > 2 else None,
}
for prog in progs
]
)
)
self.progs_name = set([prog["name"] for prog in self.progs.values()])
self.length_type = bar.STATIC
self.length = 0
def _configure(self, qtile, pbar):
base._Widget._configure(self, qtile, pbar)
self.lookup_icons()
self.setup_images()
self.length = self.calculate_length()
def setup_images(self):
"""Create image structures for each icon files."""
for img_name, iconfile in self.icons_files.items():
if iconfile is None:
logger.warning(
'No icon found for application "%s" (%s) switch to text mode',
img_name,
iconfile,
)
# if no icon is found and no default icon was set, we just
# print the name, based on a textbox.
textbox = base._TextBox()
textbox._configure(self.qtile, self.bar)
textbox.layout = self.drawer.textlayout(
textbox.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=textbox.markup,
)
# the name will be displayed
textbox.text = img_name
textbox.calculate_length()
self.icons_widths[img_name] = textbox.width
self.surfaces[img_name] = textbox
continue
else:
try:
img = cairocffi.ImageSurface.create_from_png(iconfile)
except cairocffi.Error:
logger.exception(
'Error loading icon for application "%s" (%s)', img_name, iconfile
)
return
input_width = img.get_width()
input_height = img.get_height()
sp = input_height / (self.bar.height - 4)
width = int(input_width / sp)
imgpat = cairocffi.SurfacePattern(img)
scaler = cairocffi.Matrix()
scaler.scale(sp, sp)
scaler.translate(self.padding * -1, -2)
imgpat.set_matrix(scaler)
imgpat.set_filter(cairocffi.FILTER_BEST)
self.surfaces[img_name] = imgpat
self.icons_widths[img_name] = width
def _lookup_icon(self, name):
"""Search for the icon corresponding to one command."""
self.icons_files[name] = None
# if the software_name is directly an absolute path icon file
if os.path.isabs(name):
# name start with '/' thus it's an absolute path
root, ext = os.path.splitext(name)
if ext == ".png":
self.icons_files[name] = name if os.path.isfile(name) else None
else:
# try to add the extension
self.icons_files[name] = name + ".png" if os.path.isfile(name + ".png") else None
else:
self.icons_files[name] = getIconPath(name)
# no search method found an icon, so default icon
if self.icons_files[name] is None:
self.icons_files[name] = self.default_icon
def lookup_icons(self):
"""Search for the icons corresponding to the commands to execute."""
if self.default_icon is not None:
if not os.path.isfile(self.default_icon):
# if the default icon provided is not found, switch to
# text mode
self.default_icon = None
for name in self.progs_name:
self._lookup_icon(name)
def get_icon_in_position(self, x, y):
"""Determine which icon is clicked according to its position."""
for i in self.progs:
if x < (
self.icons_offsets[i]
+ self.icons_widths[self.progs[i]["name"]]
+ self.padding / 2
):
return i
def button_press(self, x, y, button):
"""Launch the associated command to the clicked icon."""
base._Widget.button_press(self, x, y, button)
if button == 1:
icon = self.get_icon_in_position(x, y)
if icon is not None:
cmd = self.progs[icon]["cmd"]
if cmd.startswith("qshell:"):
exec(cmd[7:].lstrip())
else:
self.qtile.cmd_spawn(cmd)
self.draw()
def draw(self):
"""Draw the icons in the widget."""
self.drawer.clear(self.background or self.bar.background)
xoffset = 0
for i in sorted(self.progs.keys()):
self.icons_offsets[i] = xoffset + self.padding
name = self.progs[i]["name"]
icon_width = self.icons_widths[name]
self.drawer.ctx.move_to(self.offset + xoffset, icon_width)
self.drawer.clear(self.background or self.bar.background)
if isinstance(self.surfaces[name], base._TextBox):
# display the name if no icon was found and no default icon
textbox = self.surfaces[name]
textbox.layout.draw(
self.padding + textbox.actual_padding,
int((self.bar.height - textbox.layout.height) / 2.0) + 1,
)
else:
# display an icon
self.drawer.ctx.set_source(self.surfaces[name])
self.drawer.ctx.paint()
self.drawer.draw(
offsetx=self.offset + xoffset,
offsety=self.offsety,
width=icon_width + self.padding,
)
xoffset += icon_width + self.padding
if self.padding:
self.drawer.draw(
offsetx=self.offset + xoffset, offsety=self.offsety, width=self.padding
)
def calculate_length(self):
"""Compute the width of the widget according to each icon width."""
return sum(
self.icons_widths[prg["name"]] for prg in self.progs.values()
) + self.padding * (len(self.progs) + 1)
| |
"""
Custom manager for Objects.
"""
import re
from itertools import chain
from django.db.models import Q
from django.conf import settings
from django.db.models.fields import exceptions
from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager
from evennia.utils.utils import is_iter, make_iter, string_partial_matching
__all__ = ("ObjectManager",)
_GA = object.__getattribute__
# delayed import
_ATTR = None
_MULTIMATCH_REGEX = re.compile(settings.SEARCH_MULTIMATCH_REGEX, re.I + re.U)
# Try to use a custom way to parse id-tagged multimatches.
class ObjectDBManager(TypedObjectManager):
"""
This ObjectManager implements methods for searching
and manipulating Objects directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (alias: dbref_search)
get_dbref_range
object_totals
typeclass_search
get_object_with_account
get_objs_with_key_and_typeclass
get_objs_with_attr
get_objs_with_attr_match
get_objs_with_db_property
get_objs_with_db_property_match
get_objs_with_key_or_alias
get_contents
object_search (interface to many of the above methods,
equivalent to evennia.search_object)
copy_object
"""
#
# ObjectManager Get methods
#
# account related
def get_object_with_account(self, ostring, exact=True, candidates=None):
"""
Search for an object based on its account's name or dbref.
Args:
ostring (str or int): Search criterion or dbref. Searching
for an account is sometimes initiated by appending an `*` to
the beginning of the search criterion (e.g. in
local_and_global_search). This is stripped here.
exact (bool, optional): Require an exact account match.
candidates (list, optional): Only search among this list of possible
object candidates.
Return:
match (query): Matching query.
"""
ostring = str(ostring).lstrip("*")
# simplest case - search by dbref
dbref = self.dbref(ostring)
if dbref:
try:
return self.get(db_account__id=dbref)
except self.model.DoesNotExist:
pass
# not a dbref. Search by name.
cand_restriction = (
candidates is not None
and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj])
or Q()
)
if exact:
return self.filter(cand_restriction & Q(db_account__username__iexact=ostring)).order_by(
"id"
)
else: # fuzzy matching
obj_cands = self.select_related().filter(
cand_restriction & Q(db_account__username__istartswith=ostring)
)
acct_cands = [obj.account for obj in obj_cands]
if obj_cands:
index_matches = string_partial_matching(
[acct.key for acct in acct_cands], ostring, ret_index=True
)
acct_cands = [acct_cands[i].id for i in index_matches]
return obj_cands.filter(db_account__id__in=acct_cands).order_by("id")
def get_objs_with_key_and_typeclass(self, oname, otypeclass_path, candidates=None):
"""
Returns objects based on simultaneous key and typeclass match.
Args:
oname (str): Object key to search for
otypeclass_path (str): Full Python path to tyepclass to search for
candidates (list, optional): Only match among the given list of candidates.
Returns:
matches (query): The matching objects.
"""
cand_restriction = (
candidates is not None
and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj])
or Q()
)
return self.filter(
cand_restriction & Q(db_key__iexact=oname, db_typeclass_path__exact=otypeclass_path)
).order_by("id")
# attr/property related
def get_objs_with_attr(self, attribute_name, candidates=None):
"""
Get objects based on having a certain Attribute defined.
Args:
attribute_name (str): Attribute name to search for.
candidates (list, optional): Only match among the given list of object
candidates.
Returns:
matches (query): All objects having the given attribute_name defined at all.
"""
cand_restriction = (
candidates is not None and Q(id__in=[obj.id for obj in candidates]) or Q()
)
return self.filter(cand_restriction & Q(db_attributes__db_key=attribute_name)).order_by(
"id"
)
def get_objs_with_attr_value(
self, attribute_name, attribute_value, candidates=None, typeclasses=None
):
"""
Get all objects having the given attrname set to the given value.
Args:
attribute_name (str): Attribute key to search for.
attribute_value (any): Attribute value to search for. This can also be database objects.
candidates (list, optional): Candidate objects to limit search to.
typeclasses (list, optional): Python pats to restrict matches with.
Returns:
matches (query): Objects fullfilling both the `attribute_name` and
`attribute_value` criterions.
Notes:
This uses the Attribute's PickledField to transparently search the database by matching
the internal representation. This is reasonably effective but since Attribute values
cannot be indexed, searching by Attribute key is to be preferred whenever possible.
"""
cand_restriction = (
candidates is not None
and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj])
or Q()
)
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
results = self.filter(
cand_restriction
& type_restriction
& Q(db_attributes__db_key=attribute_name)
& Q(db_attributes__db_value=attribute_value)
).order_by("id")
return results
def get_objs_with_db_property(self, property_name, candidates=None):
"""
Get all objects having a given db field property.
Args:
property_name (str): The name of the field to match for.
candidates (list, optional): Only search among th egiven candidates.
Returns:
matches (list): The found matches.
"""
property_name = "db_%s" % property_name.lstrip("db_")
cand_restriction = (
candidates is not None
and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj])
or Q()
)
querykwargs = {property_name: None}
try:
return list(self.filter(cand_restriction).exclude(Q(**querykwargs)).order_by("id"))
except exceptions.FieldError:
return []
def get_objs_with_db_property_value(
self, property_name, property_value, candidates=None, typeclasses=None
):
"""
Get objects with a specific field name and value.
Args:
property_name (str): Field name to search for.
property_value (any): Value required for field with `property_name` to have.
candidates (list, optional): List of objects to limit search to.
typeclasses (list, optional): List of typeclass-path strings to restrict matches with
"""
if isinstance(property_name, str):
if not property_name.startswith("db_"):
property_name = "db_%s" % property_name
querykwargs = {property_name: property_value}
cand_restriction = (
candidates is not None
and Q(pk__in=[_GA(obj, "id") for obj in make_iter(candidates) if obj])
or Q()
)
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
try:
return list(
self.filter(cand_restriction & type_restriction & Q(**querykwargs)).order_by("id")
)
except exceptions.FieldError:
return []
except ValueError:
from evennia.utils import logger
logger.log_err(
"The property '%s' does not support search criteria of the type %s."
% (property_name, type(property_value))
)
return []
def get_contents(self, location, excludeobj=None):
"""
Get all objects that has a location set to this one.
Args:
location (Object): Where to get contents from.
excludeobj (Object or list, optional): One or more objects
to exclude from the match.
Returns:
contents (query): Matching contents, without excludeobj, if given.
"""
exclude_restriction = (
Q(pk__in=[_GA(obj, "id") for obj in make_iter(excludeobj)]) if excludeobj else Q()
)
return self.filter(db_location=location).exclude(exclude_restriction).order_by("id")
def get_objs_with_key_or_alias(self, ostring, exact=True, candidates=None, typeclasses=None):
"""
Args:
ostring (str): A search criterion.
exact (bool, optional): Require exact match of ostring
(still case-insensitive). If `False`, will do fuzzy matching
using `evennia.utils.utils.string_partial_matching` algorithm.
candidates (list): Only match among these candidates.
typeclasses (list): Only match objects with typeclasses having thess path strings.
Returns:
matches (query): A list of matches of length 0, 1 or more.
"""
if not isinstance(ostring, str):
if hasattr(ostring, "key"):
ostring = ostring.key
else:
return []
if is_iter(candidates) and not len(candidates):
# if candidates is an empty iterable there can be no matches
# Exit early.
return []
# build query objects
candidates_id = [_GA(obj, "id") for obj in make_iter(candidates) if obj]
cand_restriction = candidates is not None and Q(pk__in=candidates_id) or Q()
type_restriction = typeclasses and Q(db_typeclass_path__in=make_iter(typeclasses)) or Q()
if exact:
# exact match - do direct search
return (
(
self.filter(
cand_restriction
& type_restriction
& (
Q(db_key__iexact=ostring)
| Q(db_tags__db_key__iexact=ostring)
& Q(db_tags__db_tagtype__iexact="alias")
)
)
)
.distinct()
.order_by("id")
)
elif candidates:
# fuzzy with candidates
search_candidates = (
self.filter(cand_restriction & type_restriction).distinct().order_by("id")
)
else:
# fuzzy without supplied candidates - we select our own candidates
search_candidates = (
self.filter(
type_restriction
& (Q(db_key__istartswith=ostring) | Q(db_tags__db_key__istartswith=ostring))
)
.distinct()
.order_by("id")
)
# fuzzy matching
key_strings = search_candidates.values_list("db_key", flat=True).order_by("id")
index_matches = string_partial_matching(key_strings, ostring, ret_index=True)
if index_matches:
# a match by key
return [obj for ind, obj in enumerate(search_candidates) if ind in index_matches]
else:
# match by alias rather than by key
search_candidates = search_candidates.filter(
db_tags__db_tagtype__iexact="alias", db_tags__db_key__icontains=ostring
).distinct()
alias_strings = []
alias_candidates = []
# TODO create the alias_strings and alias_candidates lists more efficiently?
for candidate in search_candidates:
for alias in candidate.aliases.all():
alias_strings.append(alias)
alias_candidates.append(candidate)
index_matches = string_partial_matching(alias_strings, ostring, ret_index=True)
if index_matches:
# it's possible to have multiple matches to the same Object, we must weed those out
return list({alias_candidates[ind] for ind in index_matches})
return []
# main search methods and helper functions
def search_object(
self,
searchdata,
attribute_name=None,
typeclass=None,
candidates=None,
exact=True,
use_dbref=True,
):
"""
Search as an object globally or in a list of candidates and
return results. The result is always an Object. Always returns
a list.
Args:
searchdata (str or Object): The entity to match for. This is
usually a key string but may also be an object itself.
By default (if no `attribute_name` is set), this will
search `object.key` and `object.aliases` in order.
Can also be on the form #dbref, which will (if
`exact=True`) be matched against primary key.
attribute_name (str): Use this named Attribute to
match searchdata against, instead of the defaults. If
this is the name of a database field (with or without
the `db_` prefix), that will be matched too.
typeclass (str or TypeClass): restrict matches to objects
having this typeclass. This will help speed up global
searches.
candidates (list): If supplied, search will
only be performed among the candidates in this list. A
common list of candidates is the contents of the
current location searched.
exact (bool): Match names/aliases exactly or partially.
Partial matching matches the beginning of words in the
names/aliases, using a matching routine to separate
multiple matches in names with multiple components (so
"bi sw" will match "Big sword"). Since this is more
expensive than exact matching, it is recommended to be
used together with the `candidates` keyword to limit the
number of possibilities. This value has no meaning if
searching for attributes/properties.
use_dbref (bool): If False, bypass direct lookup of a string
on the form #dbref and treat it like any string.
Returns:
matches (list): Matching objects
"""
def _searcher(searchdata, candidates, typeclass, exact=False):
"""
Helper method for searching objects. `typeclass` is only used
for global searching (no candidates)
"""
if attribute_name:
# attribute/property search (always exact).
matches = self.get_objs_with_db_property_value(
attribute_name, searchdata, candidates=candidates, typeclasses=typeclass
)
if matches:
return matches
return self.get_objs_with_attr_value(
attribute_name, searchdata, candidates=candidates, typeclasses=typeclass
)
else:
# normal key/alias search
return self.get_objs_with_key_or_alias(
searchdata, exact=exact, candidates=candidates, typeclasses=typeclass
)
if not searchdata and searchdata != 0:
return []
if typeclass:
# typeclass may also be a list
typeclasses = make_iter(typeclass)
for i, typeclass in enumerate(make_iter(typeclasses)):
if callable(typeclass):
typeclasses[i] = "%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclasses[i] = "%s" % typeclass
typeclass = typeclasses
if candidates is not None:
if not candidates:
# candidates is the empty list. This should mean no matches can ever be acquired.
return []
# Convenience check to make sure candidates are really dbobjs
candidates = [cand for cand in make_iter(candidates) if cand]
if typeclass:
candidates = [
cand for cand in candidates if _GA(cand, "db_typeclass_path") in typeclass
]
dbref = not attribute_name and exact and use_dbref and self.dbref(searchdata)
if dbref:
# Easiest case - dbref matching (always exact)
dbref_match = self.dbref_search(dbref)
if dbref_match:
if not candidates or dbref_match in candidates:
return [dbref_match]
else:
return []
# Search through all possibilities.
match_number = None
# always run first check exact - we don't want partial matches
# if on the form of 1-keyword etc.
matches = _searcher(searchdata, candidates, typeclass, exact=True)
if not matches:
# no matches found - check if we are dealing with N-keyword
# query - if so, strip it.
match = _MULTIMATCH_REGEX.match(str(searchdata))
match_number = None
if match:
# strips the number
match_number, searchdata = match.group("number"), match.group("name")
match_number = int(match_number) - 1
if match_number is not None or not exact:
# run search again, with the exactness set by call
matches = _searcher(searchdata, candidates, typeclass, exact=exact)
# deal with result
if len(matches) == 1 and match_number is not None and match_number != 0:
# this indicates trying to get a single match with a match-number
# targeting some higher-number match (like 2-box when there is only
# one box in the room). This leads to a no-match.
matches = []
elif len(matches) > 1 and match_number is not None:
# multiple matches, but a number was given to separate them
if 0 <= match_number < len(matches):
# limit to one match
matches = [matches[match_number]]
else:
# a number was given outside of range. This means a no-match.
matches = []
# return a list (possibly empty)
return matches
# alias for backwards compatibility
object_search = search_object
search = search_object
#
# ObjectManager Copy method
def copy_object(
self,
original_object,
new_key=None,
new_location=None,
new_home=None,
new_permissions=None,
new_locks=None,
new_aliases=None,
new_destination=None,
):
"""
Create and return a new object as a copy of the original object. All
will be identical to the original except for the arguments given
specifically to this method. Object contents will not be copied.
Args:
original_object (Object): The object to make a copy from.
new_key (str, optional): Name of the copy, if different
from the original.
new_location (Object, optional): Alternate location.
new_home (Object, optional): Change the home location
new_aliases (list, optional): Give alternate object
aliases as a list of strings.
new_destination (Object, optional): Used only by exits.
Returns:
copy (Object or None): The copy of `original_object`,
optionally modified as per the ingoing keyword
arguments. `None` if an error was encountered.
"""
# get all the object's stats
typeclass_path = original_object.typeclass_path
if not new_key:
new_key = original_object.key
if not new_location:
new_location = original_object.location
if not new_home:
new_home = original_object.home
if not new_aliases:
new_aliases = original_object.aliases.all()
if not new_locks:
new_locks = original_object.db_lock_storage
if not new_permissions:
new_permissions = original_object.permissions.all()
if not new_destination:
new_destination = original_object.destination
# create new object
from evennia.utils import create
from evennia.scripts.models import ScriptDB
new_object = create.create_object(
typeclass_path,
key=new_key,
location=new_location,
home=new_home,
permissions=new_permissions,
locks=new_locks,
aliases=new_aliases,
destination=new_destination,
)
if not new_object:
return None
# copy over all attributes from old to new.
attrs = (
(a.key, a.value, a.category, a.lock_storage) for a in original_object.attributes.all()
)
new_object.attributes.batch_add(*attrs)
# copy over all cmdsets, if any
for icmdset, cmdset in enumerate(original_object.cmdset.all()):
if icmdset == 0:
new_object.cmdset.add_default(cmdset)
else:
new_object.cmdset.add(cmdset)
# copy over all scripts, if any
for script in original_object.scripts.all():
ScriptDB.objects.copy_script(script, new_obj=new_object)
# copy over all tags, if any
tags = (
(t.db_key, t.db_category, t.db_data) for t in original_object.tags.all(return_objs=True)
)
new_object.tags.batch_add(*tags)
return new_object
def clear_all_sessids(self):
"""
Clear the db_sessid field of all objects having also the
db_account field set.
"""
self.filter(db_sessid__isnull=False).update(db_sessid=None)
class ObjectManager(ObjectDBManager, TypeclassManager):
pass
| |
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import os
import shutil
import libvirt
import neat.common as common
import neat.locals.collector as collector
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Collector(TestCase):
@qc(10)
def start(
iterations=int_(min=0, max=10),
time_interval=int_(min=0)
):
with MockTransaction:
state = {'property': 'value'}
config = {
'log_directory': 'dir',
'log_level': 2,
'local_data_directory': 'data_dir',
'data_collector_interval': str(time_interval)}
paths = [collector.DEFAILT_CONFIG_PATH, collector.CONFIG_PATH]
fields = collector.REQUIRED_FIELDS
expect(collector).read_and_validate_config(paths, fields). \
and_return(config).once()
expect(common).init_logging('dir', 'data-collector.log', 2).once()
expect(common).start(collector.init_state,
collector.execute,
config,
time_interval).and_return(state).once()
assert collector.start() == state
def test_init_state(self):
with MockTransaction:
vir_connection = mock('virConnect')
expect(libvirt).openReadOnly(None). \
and_return(vir_connection).once()
physical_cpus = 13
expect(common).physical_cpu_count(vir_connection). \
and_return(physical_cpus).once()
config = {'sql_connection': 'db',
'host_cpu_overload_threshold': '0.95',
'host_cpu_usable_by_vms': '0.75',
'data_collector_data_length': '5'}
hostname = 'host1'
mhz = 13540
ram = 8192
expect(vir_connection).getHostname().and_return(hostname).once()
expect(collector).get_host_characteristics(vir_connection). \
and_return((mhz, ram)).once()
db = mock('db')
expect(collector).init_db('db').and_return(db).once()
expect(db).update_host(hostname,
int(mhz * 0.75),
physical_cpus,
ram).once()
state = collector.init_state(config)
assert state['previous_time'] == 0
assert isinstance(state['previous_cpu_time'], dict)
assert state['previous_host_cpu_time_total'] == 0.
assert state['previous_host_cpu_time_busy'] == 0.
assert state['previous_overload'] == -1
assert state['vir_connection'] == vir_connection
assert state['hostname'] == hostname
self.assertAlmostEqual(state['host_cpu_overload_threshold'],
0.7125, 3)
assert state['physical_cpus'] == physical_cpus
assert state['physical_cpu_mhz'] == mhz
assert state['physical_core_mhz'] == mhz / physical_cpus
assert state['db'] == db
@qc(1)
def get_previous_vms():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
previous_vms = collector.get_previous_vms(local_data_directory)
assert 'ec452be0-e5d0-11e1-aff1-0800200c9a66' in previous_vms
assert 'e615c450-e5d0-11e1-aff1-0800200c9a66' in previous_vms
assert 'f3e142d0-e5d0-11e1-aff1-0800200c9a66' in previous_vms
@qc
def get_current_vms(
ids=dict_(
keys=int_(min=0, max=1000),
values=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=10
)
):
with MockTransaction:
def init_vm(id):
vm = mock('vm')
expect(vm).UUIDString().and_return(ids[id]).once()
expect(vm).state(0).and_return([id * 13, id]).once()
return vm
connection = libvirt.virConnect()
expect(connection).listDomainsID().and_return(ids.keys()).once()
if ids:
expect(connection).lookupByID(any_int) \
.and_call(lambda id: init_vm(id))
expected = dict((v, k * 13) for k, v in ids.items())
assert collector.get_current_vms(connection) == expected
@qc
def get_added_vms(
x=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
),
y=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
)
):
previous_vms = list(x)
if x:
x.pop(random.randrange(len(x)))
x.extend(y)
assert set(collector.get_added_vms(previous_vms, x)) == set(y)
@qc
def get_removed_vms(
x=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
),
y=list_(
of=str_(of='abc123-', min_length=36, max_length=36),
min_length=0, max_length=5
)
):
prev_vms = list(x)
removed = []
if x:
to_remove = random.randrange(len(x))
for _ in xrange(to_remove):
removed.append(x.pop(random.randrange(len(x))))
x.extend(y)
assert set(collector.get_removed_vms(prev_vms, x)) == set(removed)
@qc
def substract_lists(
x=list_(of=int_(min=0, max=20), max_length=10),
y=list_(of=int_(min=0, max=20), max_length=10)
):
assert set(collector.substract_lists(x, y)) == \
set([item for item in x if item not in y])
@qc(1)
def cleanup_local_vm_data():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
local_data_directory_tmp = os.path.join(
local_data_directory, 'tmp')
shutil.rmtree(local_data_directory_tmp, True)
os.mkdir(local_data_directory_tmp)
vm1 = 'ec452be0-e5d0-11e1-aff1-0800200c9a66'
vm2 = 'e615c450-e5d0-11e1-aff1-0800200c9a66'
vm3 = 'f3e142d0-e5d0-11e1-aff1-0800200c9a66'
shutil.copy(os.path.join(local_data_directory, vm1),
local_data_directory_tmp)
shutil.copy(os.path.join(local_data_directory, vm2),
local_data_directory_tmp)
shutil.copy(os.path.join(local_data_directory, vm3),
local_data_directory_tmp)
assert len(os.listdir(local_data_directory_tmp)) == 3
collector.cleanup_local_vm_data(local_data_directory_tmp,
[vm1, vm2, vm3])
assert len(os.listdir(local_data_directory_tmp)) == 0
os.rmdir(local_data_directory_tmp)
@qc(1)
def cleanup_all_local_data():
local_data_directory = os.path.join(
os.path.dirname(__file__), '..', 'resources', 'vms')
local_data_directory_tmp = os.path.join(
local_data_directory, 'tmp')
local_data_directory_tmp_vms = os.path.join(
local_data_directory_tmp, 'vms')
local_data_directory_tmp_host = os.path.join(
local_data_directory_tmp, 'host')
shutil.rmtree(local_data_directory_tmp, True)
os.mkdir(local_data_directory_tmp)
os.mkdir(local_data_directory_tmp_vms)
vm1 = 'ec452be0-e5d0-11e1-aff1-0800200c9a66'
vm2 = 'e615c450-e5d0-11e1-aff1-0800200c9a66'
vm3 = 'f3e142d0-e5d0-11e1-aff1-0800200c9a66'
shutil.copy(os.path.join(local_data_directory, vm1),
local_data_directory_tmp_vms)
shutil.copy(os.path.join(local_data_directory, vm2),
local_data_directory_tmp_vms)
shutil.copy(os.path.join(local_data_directory, vm3),
local_data_directory_tmp_vms)
shutil.copyfile(os.path.join(local_data_directory, vm1),
local_data_directory_tmp_host)
assert len(os.listdir(local_data_directory_tmp)) == 2
assert len(os.listdir(local_data_directory_tmp_vms)) == 3
collector.cleanup_all_local_data(local_data_directory_tmp)
assert len(os.listdir(local_data_directory_tmp)) == 1
assert len(os.listdir(local_data_directory_tmp_vms)) == 0
shutil.rmtree(local_data_directory_tmp, True)
@qc
def fetch_remote_data(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
if x:
for uuid, data in x.items():
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in data:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
x[uuid] = data[-data_length:]
assert collector.fetch_remote_data(db, data_length, x.keys()) == x
@qc
def write_vm_data_locally(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
collector.write_vm_data_locally(path, x, data_length)
files = os.listdir(path)
result = {}
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
assert set(files) == set(x.keys())
for uuid, values in x.items():
if data_length > 0:
assert result[uuid] == values[-data_length:]
else:
assert result[uuid] == []
@qc
def append_vm_data_locally(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
int_(min=0, max=3000)),
min_length=0, max_length=3
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
original_data = {}
to_append = {}
after_appending = {}
for uuid, data in x.items():
original_data[uuid] = data[0]
to_append[uuid] = data[1]
if data_length > 0:
after_appending[uuid] = list(data[0])
after_appending[uuid].append(data[1])
after_appending[uuid] = after_appending[uuid][-data_length:]
else:
after_appending[uuid] = []
collector.write_vm_data_locally(path, original_data, data_length)
collector.append_vm_data_locally(path, to_append, data_length)
files = os.listdir(path)
result = {}
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
assert set(files) == set(x.keys())
for uuid in x.keys():
assert result[uuid] == after_appending[uuid]
@qc(10)
def append_vm_data_remotely(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
collector.append_vm_data_remotely(db, data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc
def append_host_data_locally(
data=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
x=int_(min=0, max=3000),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'host')
with open(path, 'w') as f:
f.write('\n'.join([str(x)
for x in data]) + '\n')
collector.append_host_data_locally(path, x, data_length)
if data_length > 0:
data.append(x)
expected = data[-data_length:]
else:
expected = []
with open(path, 'r') as f:
actual = [int(x)
for x in f.read().strip().splitlines()]
os.remove(path)
assert actual == expected
@qc(10)
def append_host_data_remotely(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=int_(min=0, max=3000)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
collector.append_host_data_remotely(db, hostname, cpu_mhz)
assert db.select_cpu_mhz_for_host(hostname, 1) == [cpu_mhz]
@qc
def get_cpu_mhz(
cpus=int_(min=1, max=8),
current_time=float_(min=100, max=1000),
time_period=float_(min=1, max=100),
vm_data=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=two(of=int_(min=1, max=100)),
min_length=0, max_length=10
),
added_vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=100),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
with MockTransaction:
def mock_get_cpu_time(vir_connection, uuid):
if uuid in original_vm_data:
return original_vm_data[uuid][0] + \
original_vm_data[uuid][1]
else:
return added_vms[uuid][0]
original_vm_data = dict(vm_data)
previous_time = current_time - time_period
connection = libvirt.virConnect()
when(collector).get_cpu_time(connection, any_string). \
then_call(mock_get_cpu_time)
previous_cpu_time = {}
cpu_mhz = {}
for uuid, data in vm_data.items():
previous_cpu_time[uuid] = data[0]
if vm_data:
to_remove = random.randrange(len(vm_data))
for _ in xrange(to_remove):
tmp = random.choice(vm_data.keys())
del vm_data[tmp]
vms = vm_data.keys()
current_cpu_time = {}
for uuid in vms:
current_cpu_time[uuid] = vm_data[uuid][0] + vm_data[uuid][1]
cpu_mhz[uuid] = collector.calculate_cpu_mhz(
cpus, previous_time, current_time,
vm_data[uuid][0], vm_data[uuid][0] + vm_data[uuid][1])
added_vm_data = {}
if added_vms:
for uuid, data in added_vms.items():
current_cpu_time[uuid] = data[0]
added_vm_data[uuid] = data[1]
if data[1]:
cpu_mhz[uuid] = data[1][-1]
vms.extend(added_vms.keys())
result = collector.get_cpu_mhz(
connection, cpus, previous_cpu_time,
previous_time, current_time, vms,
{}, added_vm_data)
assert result[0] == current_cpu_time
assert result[1] == cpu_mhz
@qc(10)
def get_cpu_time(
uuid=str_(of='abc123-', min_length=36, max_length=36),
x=int_(min=0)
):
with MockTransaction:
connection = libvirt.virConnect()
domain = mock('domain')
expect(connection).lookupByUUIDString(uuid). \
and_return(domain).once()
expect(domain).getCPUStats(True, 0). \
and_return([{'cpu_time': x}]).once()
assert collector.get_cpu_time(connection, uuid) == x
@qc
def calculate_cpu_mhz(
current_time=float_(min=100, max=1000),
time_period=float_(min=1, max=100),
current_cpu_time=int_(min=100),
cpu_time=int_(min=0, max=100),
mhz=int_(min=1, max=3000)
):
previous_time = current_time - time_period
previous_cpu_time = current_cpu_time - cpu_time
assert collector. \
calculate_cpu_mhz(mhz, previous_time, current_time,
previous_cpu_time, current_cpu_time) == \
int((mhz * cpu_time / (time_period * 1000000000)))
@qc
def get_host_cpu_mhz(
cpu_mhz=int_(min=1, max=1000),
prev_total=float_(min=100, max=1000),
prev_busy=float_(min=1, max=100),
diff_total=float_(min=100, max=1000),
diff_busy=float_(min=1, max=100)
):
with MockTransaction:
total = prev_total + diff_total
busy = prev_busy + diff_busy
expect(collector).get_host_cpu_time(). \
and_return((total, busy)).once()
assert collector.get_host_cpu_mhz(cpu_mhz, prev_total, prev_busy) == \
(total,
busy,
int(cpu_mhz * diff_busy / diff_total))
@qc(1)
def get_host_cpu_mhz_exception():
cpu_mhz = 1
total = 1.
prev_total = 0.
busy = 1.
prev_busy = 2.
with MockTransaction:
expect(collector).get_host_cpu_time(). \
and_return((total, busy)).once()
try:
collector.get_host_cpu_mhz(cpu_mhz, prev_total, prev_busy)
assert False
except ValueError:
assert True
@qc(10)
def get_host_cpu_time(
x=list_(of=int_(min=1, max=1000), min_length=7, max_length=7)
):
with MockTransaction:
context = mock('context')
f = mock('file')
expect(context).__enter__().and_return(f).once()
when(context).__exit__.and_return(True)
expect(collector).open('/proc/stat', 'r').and_return(context).once()
expect(f).readline().and_return(
'1 ' + ' '.join([str(v) for v in x]) + ' 2 3').once()
assert collector.get_host_cpu_time() == (float(sum(x)),
float(sum(x[0:3])))
@qc(10)
def get_host_characteristics(
ram=int_(min=1, max=4000),
cores=int_(min=1, max=8),
mhz=int_(min=1, max=3000)
):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return(
['x86_64', ram, cores, mhz, 1, 1, 4, 2]).once()
assert collector.get_host_characteristics(connection) == \
(cores * mhz, ram)
@qc(10)
def get_host_characteristics_long(
ram=int_(min=1, max=4000),
cores=int_(min=1, max=8),
mhz=int_(min=1, max=3000)
):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).getInfo().and_return(
['x86_64', long(ram), cores, mhz, 1, 1, 4, 2]).once()
assert collector.get_host_characteristics(connection) == \
(cores * mhz, long(ram))
@qc(1)
def log_host_overload():
db = db_utils.init_db('sqlite:///:memory:')
with MockTransaction:
expect(db).insert_host_overload('host', 1).once()
assert collector.log_host_overload(db, 0.9, 'host', -1, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload('host', 0).once()
assert not collector.log_host_overload(db, 0.9, 'host', -1, 3000, 2600)
with MockTransaction:
expect(db).insert_host_overload('host', 1).once()
assert collector.log_host_overload(db, 0.9, 'host', 0, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload('host', 0).once()
assert not collector.log_host_overload(db, 0.9, 'host', 1, 3000, 2600)
with MockTransaction:
expect(db).insert_host_overload.never()
assert collector.log_host_overload(db, 0.9, 'host', 1, 3000, 2800)
with MockTransaction:
expect(db).insert_host_overload.never()
assert not collector.log_host_overload(db, 0.9, 'host', 0, 3000, 2600)
def deque_maxlen(coll):
return int(re.sub("\)$", "", re.sub(".*=", "", coll.__repr__())))
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for generating captions from an image-to-text model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import heapq
import math
import numpy as np
class Caption(object):
"""Represents a complete or partial caption."""
def __init__(self, sentence, state, logprob, score, metadata=None):
"""Initializes the Caption.
Args:
sentence: List of word ids in the caption.
state: Model state after generating the previous word.
logprob: Log-probability of the caption.
score: Score of the caption.
metadata: Optional metadata associated with the partial sentence. If not
None, a list of strings with the same length as 'sentence'.
"""
self.sentence = sentence
self.state = state
self.logprob = logprob
self.score = score
self.metadata = metadata
def __cmp__(self, other):
"""Compares Captions by score."""
assert isinstance(other, Caption)
if self.score == other.score:
return 0
elif self.score < other.score:
return -1
else:
return 1
# For Python 3 compatibility (__cmp__ is deprecated).
def __lt__(self, other):
assert isinstance(other, Caption)
return self.score < other.score
# Also for Python 3 compatibility.
def __eq__(self, other):
assert isinstance(other, Caption)
return self.score == other.score
class TopN(object):
"""Maintains the top n elements of an incrementally provided set."""
def __init__(self, n):
self._n = n
self._data = []
def size(self):
assert self._data is not None
return len(self._data)
def push(self, x):
"""Pushes a new element."""
assert self._data is not None
if len(self._data) < self._n:
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x)
def extract(self, sort=False):
"""Extracts all elements from the TopN. This is a destructive operation.
The only method that can be called immediately after extract() is reset().
Args:
sort: Whether to return the elements in descending sorted order.
Returns:
A list of data; the top n elements provided to the set.
"""
assert self._data is not None
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data
def reset(self):
"""Returns the TopN to an empty state."""
self._data = []
class CaptionGenerator(object):
"""Class to generate captions from an image-to-text model."""
def __init__(self,
model,
vocab,
beam_size=3,
max_caption_length=20,
length_normalization_factor=0.0):
"""Initializes the generator.
Args:
model: Object encapsulating a trained image-to-text model. Must have
methods feed_image() and inference_step(). For example, an instance of
InferenceWrapperBase.
vocab: A Vocabulary object.
beam_size: Beam size to use when generating captions.
max_caption_length: The maximum caption length before stopping the search.
length_normalization_factor: If != 0, a number x such that captions are
scored by logprob/length^x, rather than logprob. This changes the
relative scores of captions depending on their lengths. For example, if
x > 0 then longer captions will be favored.
"""
self.vocab = vocab
self.model = model
self.beam_size = beam_size
self.max_caption_length = max_caption_length
self.length_normalization_factor = length_normalization_factor
def beam_search(self, sess, encoded_image):
"""Runs beam search caption generation on a single image.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
A list of Caption sorted by descending score.
"""
# Feed in the image to get the initial state.
initial_state = self.model.feed_image(sess, encoded_image)
initial_beam = Caption(
sentence=[self.vocab.start_id],
state=initial_state[0],
logprob=0.0,
score=0.0,
metadata=[""])
partial_captions = TopN(self.beam_size)
partial_captions.push(initial_beam)
complete_captions = TopN(self.beam_size)
# Run beam search.
for _ in range(self.max_caption_length - 1):
partial_captions_list = partial_captions.extract()
partial_captions.reset()
input_feed = np.array([c.sentence[-1] for c in partial_captions_list])
state_feed = np.array([c.state for c in partial_captions_list])
softmax, new_states, metadata = self.model.inference_step(sess,
input_feed,
state_feed)
for i, partial_caption in enumerate(partial_captions_list):
word_probabilities = softmax[i]
state = new_states[i]
# For this partial caption, get the beam_size most probable next words.
# Sort the indexes with numpy, select the last self.beam_size
# (3 by default) (ie, the most likely) and then reverse the sorted
# indexes with [::-1] to sort them from higher to lower.
most_likely_words = np.argsort(word_probabilities)[:-self.beam_size][::-1]
for w in most_likely_words:
p = word_probabilities[w]
if p < 1e-12:
continue # Avoid log(0).
sentence = partial_caption.sentence + [w]
logprob = partial_caption.logprob + math.log(p)
score = logprob
if metadata:
metadata_list = partial_caption.metadata + [metadata[i]]
else:
metadata_list = None
if w == self.vocab.end_id:
if self.length_normalization_factor > 0:
score /= len(sentence)**self.length_normalization_factor
beam = Caption(sentence, state, logprob, score, metadata_list)
complete_captions.push(beam)
else:
beam = Caption(sentence, state, logprob, score, metadata_list)
partial_captions.push(beam)
if partial_captions.size() == 0:
# We have run out of partial candidates; happens when beam_size = 1.
break
# If we have no complete captions then fall back to the partial captions.
# But never output a mixture of complete and partial captions because a
# partial caption could have a higher score than all the complete captions.
if not complete_captions.size():
complete_captions = partial_captions
return complete_captions.extract(sort=True)
| |
import json
import time
import sys
import boto3
import urllib
import os.path
import botocore.exceptions
from .resources import Resource
from .exceptions import MissingDependencyError, OfflineContextError
import redleader.util as util
class Cluster(object):
def __init__(self,
cluster_class,
context,
pretty_names=True,
auto_add_resources=True
):
self._cluster_class = cluster_class
self._context = context
self._resources = {}
self._pretty_names = pretty_names
self._auto_add_resources = auto_add_resources
def add_resource(self, resource):
for sub_resource in resource.generate_sub_resources():
self.add_resource(sub_resource)
self._resources[resource.get_id()] = resource
def validate(self):
for resource_id in list(self._resources.keys()):
resource = self._resources[resource_id]
for dependency in resource.get_dependencies():
x = dependency.get_id()
if x not in self._resources:
print("Dependency %s missing from cluster." % resource.get_id())
if(self._auto_add_resources):
print("\tAutomatically adding resource to cluster.")
self.add_resource(dependency)
else:
print(x)
print(self._resources.keys())
raise MissingDependencyError(
source_resource= resource.get_id(),
missing_resource= dependency.get_id()
)
def _mod_identifier(self, ident):
"""
Add the cluster class onto the cloud formation identifier
"""
return self._cluster_class + ident
def _cluster_mod_identifiers(self, tmpl, replaceMap=None):
"""
Modify all cloud formation identifiers so that they're unique to this cluster class
"""
if replaceMap is None:
replaceMap = {}
for k in self._resources:
replaceMap[k] = self._mod_identifier(k)
if isinstance(tmpl, str):
tmpl = util.multireplace(tmpl, replaceMap)
elif isinstance(tmpl, dict):
for k in tmpl:
tmpl[k] = self._cluster_mod_identifiers(tmpl[k], replaceMap)
elif isinstance(tmpl, list):
for idx in range(len(tmpl)):
tmpl[idx] = self._cluster_mod_identifiers(tmpl[idx], replaceMap)
return tmpl
def cloud_formation_template(self):
self.validate()
Resource.reset_multiplicity()
templates = {}
for resource_id in self._resources:
resource = self._resources[resource_id]
template = resource.cloud_formation_template()
if template is not None:
templates[self._mod_identifier(resource_id)] = \
self._cluster_mod_identifiers(template)
with open("/tmp/redleader_cloudformation.json", 'w') as f:
f.write(json.dumps(templates, indent=4, sort_keys=True))
return {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": templates
}
def estimate_template_cost(self):
template = self.cloud_formation_template()
client = self._context.get_client('cloudformation')
return client.estimate_template_cost(TemplateBody=json.dumps(template))['Url']
def deploy(self):
client = self._context.get_client('cloudformation')
return client.create_stack(
StackName=self._cluster_class,
TemplateBody=json.dumps(self.cloud_formation_template()),
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
)
def blocking_deploy(self, verbose=False):
self.deploy()
if verbose:
print("Cluster creation in progress")
i = 0
while self.deployment_status() == "CREATE_IN_PROGRESS":
i += 1
if verbose:
util.print_progress(i)
time.sleep(5)
if verbose:
print("Cluster successfully created")
return self.deployment_status()
def update(self):
client = self._context.get_client('cloudformation')
return client.update_stack(
StackName=self._cluster_class,
TemplateBody=json.dumps(self.cloud_formation_template()),
Capabilities=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']
)
def blocking_update(self, verbose=False):
self.update()
if verbose:
print("Cluster update in progress")
i = 0
while self.deployment_status() == "UPDATE_IN_PROGRESS":
i += 1
if verbose:
util.print_progress(i)
time.sleep(5)
if verbose:
print("Cluster update finished with status %s" % self.deployment_status())
return self.deployment_status()
def describe_stack(self):
client = self._context.get_client('cloudformation')
return client.describe_stacks(
StackName=self._cluster_class
)
def describe_resources(self):
client = self._context.get_client('cloudformation')
return client.describe_stack_resources(
StackName=self._cluster_class
)
def describe_resource(self, resource):
client = self._context.get_client('cloudformation')
return client.describe_stack_resource(
StackName=self._cluster_class,
LogicalResourceId=self._mod_identifier(resource.get_id())
)
def deployment_status(self):
response = self.describe_stack()
# Possible statuses:
# 'StackStatus': 'CREATE_IN_PROGRESS'|'CREATE_FAILED'|'CREATE_COMPLETE'|'ROLLBACK_IN_PROGRESS'|'ROLLBACK_FAILED'|'ROLLBACK_COMPLETE'|'DELETE_IN_PROGRESS'|'DELETE_FAILED'|'DELETE_COMPLETE'|'UPDATE_IN_PROGRESS'|'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_COMPLETE'|'UPDATE_ROLLBACK_IN_PROGRESS'|'UPDATE_ROLLBACK_FAILED'|'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS'|'UPDATE_ROLLBACK_COMPLETE'|'REVIEW_IN_PROGRESS',
return response['Stacks'][0]['StackStatus']
def delete(self):
client = self._context.get_client('cloudformation')
return client.delete_stack(
StackName=self._cluster_class
)
def blocking_delete(self, verbose=False):
self.delete()
try:
i = 0
if verbose:
print("Cluster deletion in progress")
while self.deployment_status() == "DELETE_IN_PROGRESS":
i += 1
if verbose:
util.print_progress(i)
time.sleep(5)
if verbose:
print("Cluster successfully deleted")
return self.deployment_status()
except botocore.exceptions.ClientError:
if verbose:
print("Stack fully deleted, could not obtain deployment status")
return None
def cluster_exists(self):
"""
Find resources for this cluster that have already deployed
"""
try:
status = self.deployment_status()
return True
except Exception as e:
print("Cluster may not exist. Encountered error %s" % e)
return False
def cloud_formation_deploy(self):
"""
TODO
"""
raise NotImplementedError
class Context(object):
def __init__(self, **kwargs):
self._dict = None
return
def get_dict(self):
""" Returns an english dictionary. Useful for pretty hashing"""
if self._dict is not None:
return self._dict
dict_path = "/tmp/redleader_dict.txt"
if(os.path.isfile(dict_path)):
with open(dict_path, 'r') as f:
self._dict = f.read().split("\n")
else:
print("Downloading fresh redleader dictionary")
url = "https://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text%2Fplain&revision=61569"
openfun = None
if hasattr(urllib, "request"):
# Python 3.x
openfun = urllib.request.urlopen
else:
# Python 2.x
openfun = urllib.urlopen
response = openfun(url)
dict_text = response.read().decode('utf-8')
with open(dict_path, 'w') as f:
f.write(dict_text)
self._dict = dict_text.split("\n")
return self._dict
class OfflineContext(Context):
def __init__(self, **kwargs):
super().__init__()
def pretty_names(self):
return False
def get_session(self):
raise OfflineContextError(action="get_session")
def get_account_id(self):
return "offline_context_account_id"
def get_region(self):
return "us-west-1"
def get_client(self, service):
raise OfflineContextError(action="get_client")
class AWSContext(Context):
"""
AWS Context for RedLeader, managing AWS sessions and clients.
"""
def __init__(self,
aws_profile=None,
aws_access_key_id=None,
aws_secret_access_key=None,
aws_region="us-west-1",
pretty_names=True
):
super(AWSContext, self).__init__()
self._aws_profile = aws_profile
self._aws_access_key_id = aws_access_key_id
self._aws_secret_access_key = aws_secret_access_key
self._aws_region = aws_region
self._pretty_names = pretty_names
self._clients = {}
self._account_id = None
try:
print("Creating Redleader AWS Session with profile %s" % self._aws_profile)
self._session = boto3.Session(profile_name=self._aws_profile,
region_name=self._aws_region)
except botocore.exceptions.NoCredentialsError:
self._session = boto3.Session(region_name=self._aws_region)
def get_session(self):
return self._session
def get_region(self):
return self._aws_region
def get_account_id(self):
if self._account_id is None:
self._account_id = self.get_client('sts').get_caller_identity().get('Account')
return self._account_id
def pretty_names(self):
return self._pretty_names
def get_client(self, client_type, region_name=None):
kwargs = {}
if region_name is not None:
kwargs["region_name"] = region_name
# Always return a new client for custom region requests
# TODO: Cache clients by region
return self._session.client(client_type, **kwargs)
if client_type not in self._clients:
self._clients[client_type] = self._session.client(client_type, **kwargs)
return self._clients[client_type]
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import ctypes
import datetime
import pickle
import sys
import tempfile
import unittest
from pyflink.pyflink_gateway_server import on_windows
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table.types import (_infer_schema_from_data, _infer_type,
_array_signed_int_typecode_ctype_mappings,
_array_unsigned_int_typecode_ctype_mappings,
_array_type_mappings, _merge_type,
_create_type_verifier, UserDefinedType, DataTypes, Row, RowField,
RowType, ArrayType, BigIntType, VarCharType, MapType, DataType,
_to_java_type, _from_java_type, ZonedTimestampType,
LocalZonedTimestampType)
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return 'pyflink.table.tests.test_types'
@classmethod
def java_udt(cls):
return 'org.apache.flink.table.types.python.ExamplePointUserDefinedType'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.OFFSET = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.OFFSET
def dst(self, dt):
return self.OFFSET
class TypesTests(PyFlinkTestCase):
def test_infer_schema(self):
from decimal import Decimal
class A(object):
def __init__(self):
self.a = 1
from collections import namedtuple
Point = namedtuple('Point', 'x y')
data = [
True,
1,
"a",
u"a",
datetime.date(1970, 1, 1),
datetime.time(0, 0, 0),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
array.array("d", [1]),
[1],
(1,),
Point(1.0, 5.0),
{"a": 1},
bytearray(1),
Decimal(1),
Row(a=1),
Row("a")(1),
A(),
]
expected = [
'BooleanType(true)',
'BigIntType(true)',
'VarCharType(2147483647, true)',
'VarCharType(2147483647, true)',
'DateType(true)',
'TimeType(0, true)',
'LocalZonedTimestampType(6, true)',
'DoubleType(true)',
"ArrayType(DoubleType(false), true)",
"ArrayType(BigIntType(true), true)",
'RowType(RowField(_1, BigIntType(true), ...))',
'RowType(RowField(x, DoubleType(true), ...),RowField(y, DoubleType(true), ...))',
'MapType(VarCharType(2147483647, false), BigIntType(true), true)',
'VarBinaryType(2147483647, true)',
'DecimalType(38, 18, true)',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
]
schema = _infer_schema_from_data([data])
self.assertEqual(expected, [repr(f.data_type) for f in schema.fields])
def test_infer_schema_nulltype(self):
elements = [Row(c1=[], c2={}, c3=None),
Row(c1=[Row(a=1, b='s')], c2={"key": Row(c=1.0, d="2")}, c3="")]
schema = _infer_schema_from_data(elements)
self.assertTrue(isinstance(schema, RowType))
self.assertEqual(3, len(schema.fields))
# first column is array
self.assertTrue(isinstance(schema.fields[0].data_type, ArrayType))
# element type of first column is struct
self.assertTrue(isinstance(schema.fields[0].data_type.element_type, RowType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[0].data_type,
BigIntType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[1].data_type,
VarCharType))
# second column is map
self.assertTrue(isinstance(schema.fields[1].data_type, MapType))
self.assertTrue(isinstance(schema.fields[1].data_type.key_type, VarCharType))
self.assertTrue(isinstance(schema.fields[1].data_type.value_type, RowType))
# third column is varchar
self.assertTrue(isinstance(schema.fields[2].data_type, VarCharType))
def test_infer_schema_not_enough_names(self):
schema = _infer_schema_from_data([["a", "b"]], ["col1"])
self.assertTrue(schema.names, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaises(TypeError):
_infer_schema_from_data([[1, 1], ["x", 1]], names=["a", "b"])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
data1 = [NestedRow([1, 2], {"row1": 1.0}), NestedRow([2, 3], {"row2": 2.0})]
schema1 = _infer_schema_from_data(data1)
expected1 = [
'ArrayType(BigIntType(true), true)',
'MapType(VarCharType(2147483647, false), DoubleType(true), true)'
]
self.assertEqual(expected1, [repr(f.data_type) for f in schema1.fields])
data2 = [NestedRow([[1, 2], [2, 3]], [1, 2]), NestedRow([[2, 3], [3, 4]], [2, 3])]
schema2 = _infer_schema_from_data(data2)
expected2 = [
'ArrayType(ArrayType(BigIntType(true), true), true)',
'ArrayType(BigIntType(true), true)'
]
self.assertEqual(expected2, [repr(f.data_type) for f in schema2.fields])
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.as_dict()['l'][0].a)
self.assertEqual(1.0, row.as_dict()['d']['key'].c)
def test_udt(self):
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_create_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(ExamplePointUDT())([1.0, 2.0]))
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_create_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_nested_udt_in_df(self):
expected_schema = DataTypes.ROW() \
.add("_1", DataTypes.BIGINT()).add("_2", DataTypes.ARRAY(PythonOnlyUDT()))
data = (1, [PythonOnlyPoint(float(1), float(2))])
self.assertEqual(expected_schema, _infer_type(data))
expected_schema = DataTypes.ROW().add("_1", DataTypes.BIGINT()).add(
"_2", DataTypes.MAP(DataTypes.BIGINT(False), PythonOnlyUDT()))
p = (1, {1: PythonOnlyPoint(1, float(2))})
self.assertEqual(expected_schema, _infer_type(p))
def test_struct_type(self):
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True), None)])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True))])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: DataTypes.ROW().add("name"))
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
for field in row1:
self.assertIsInstance(field, RowField)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertEqual(len(row1), 2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertIs(row1["f1"], row1.fields[0])
self.assertIs(row1[0], row1.fields[0])
self.assertEqual(row1[0:1], DataTypes.ROW(row1.fields[0:1]))
self.assertRaises(KeyError, lambda: row1["f9"])
self.assertRaises(IndexError, lambda: row1[9])
self.assertRaises(TypeError, lambda: row1[9.9])
def test_infer_bigint_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
schema = _infer_schema_from_data(longrow)
self.assertEqual(DataTypes.BIGINT(), schema.fields[1].data_type)
self.assertEqual(DataTypes.BIGINT(), _infer_type(1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 10))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 20))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31 - 1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 61))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 71))
def test_merge_type(self):
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.NULL()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.NULL(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT())
), DataTypes.ARRAY(DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(DataTypes.ARRAY(DataTypes.BIGINT()), DataTypes.ARRAY(DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())
), DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.DOUBLE()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))])
), DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.STRING())]))]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT())))])
)
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assert_collect_success(typecode, value, element_type):
self.assertEqual(element_type,
str(_infer_type(array.array(typecode, [value])).element_type))
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assert_collect_success('u', u'a', 'CHAR')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assert_collect_success('f', ctypes.c_float(1e+38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1e-38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1.123456).value, 'FLOAT')
assert_collect_success('d', sys.float_info.max, 'DOUBLE')
assert_collect_success('d', sys.float_info.min, 'DOUBLE')
assert_collect_success('d', sys.float_info.epsilon, 'DOUBLE')
def get_int_data_type(size):
if size <= 8:
return "TINYINT"
if size <= 16:
return "SMALLINT"
if size <= 32:
return "INT"
if size <= 64:
return "BIGINT"
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val - 1, get_int_data_type(ctypes.sizeof(ctype) * 8))
assert_collect_success(t, -max_val, get_int_data_type(ctypes.sizeof(ctype) * 8))
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val, get_int_data_type(ctypes.sizeof(ctype) * 8 + 1))
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
_infer_schema_from_data([Row(myarray=array.array(t))])
def test_data_type_eq(self):
lt = DataTypes.BIGINT()
lt2 = pickle.loads(pickle.dumps(DataTypes.BIGINT()))
self.assertEqual(lt, lt2)
def test_decimal_type(self):
t1 = DataTypes.DECIMAL(10, 0)
t2 = DataTypes.DECIMAL(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
def test_datetype_equal_zero(self):
dt = DataTypes.DATE()
self.assertEqual(dt.from_sql_type(0), datetime.date(1970, 1, 1))
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_timestamp_microsecond(self):
tst = DataTypes.TIMESTAMP()
self.assertEqual(tst.to_sql_type(datetime.datetime.max) % 1000000, 999999)
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_local_zoned_timestamp_type(self):
lztst = DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()
last_abbreviation = DataTypes.TIMESTAMP_LTZ()
self.assertEqual(lztst, last_abbreviation)
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000)
self.assertEqual(0, lztst.to_sql_type(ts))
import pytz
# suppose the timezone of the data is +9:00
timezone = pytz.timezone("Asia/Tokyo")
orig_epoch = LocalZonedTimestampType.EPOCH_ORDINAL
try:
# suppose the local timezone is +8:00
LocalZonedTimestampType.EPOCH_ORDINAL = 28800000000
ts_tokyo = timezone.localize(ts)
self.assertEqual(-3600000000, lztst.to_sql_type(ts_tokyo))
finally:
LocalZonedTimestampType.EPOCH_ORDINAL = orig_epoch
if sys.version_info >= (3, 6):
ts2 = lztst.from_sql_type(0)
self.assertEqual(ts.astimezone(), ts2.astimezone())
def test_zoned_timestamp_type(self):
ztst = ZonedTimestampType()
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000, tzinfo=UTCOffsetTimezone(1))
self.assertEqual((0, 3600), ztst.to_sql_type(ts))
ts2 = ztst.from_sql_type((0, 3600))
self.assertEqual(ts, ts2)
def test_day_time_inteval_type(self):
ymt = DataTypes.INTERVAL(DataTypes.DAY(), DataTypes.SECOND())
td = datetime.timedelta(days=1, seconds=10)
self.assertEqual(86410000000, ymt.to_sql_type(td))
td2 = ymt.from_sql_type(86410000000)
self.assertEqual(td, td2)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
def test_nullable(self):
t = DataType(nullable=False)
self.assertEqual(t._nullable, False)
t_nullable = t.nullable()
self.assertEqual(t_nullable._nullable, True)
def test_not_null(self):
t = DataType(nullable=True)
self.assertEqual(t._nullable, True)
t_notnull = t.not_null()
self.assertEqual(t_notnull._nullable, False)
class DataTypeVerificationTests(PyFlinkTestCase):
def test_verify_type_exception_msg(self):
self.assertRaises(
ValueError,
lambda: _create_type_verifier(
DataTypes.STRING(nullable=False), name="test_name")(None))
schema = DataTypes.ROW(
[DataTypes.FIELD('a', DataTypes.ROW([DataTypes.FIELD('b', DataTypes.INT())]))])
self.assertRaises(
TypeError,
lambda: _create_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [DataTypes.INT(), DataTypes.FLOAT(), DataTypes.STRING(), DataTypes.ROW([])]
for data_type in types:
try:
_create_type_verifier(data_type)(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = DataTypes.ROW([
DataTypes.FIELD('s', DataTypes.STRING(nullable=False)),
DataTypes.FIELD('i', DataTypes.INT(True))])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", DataTypes.STRING()),
(u"", DataTypes.STRING()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, DataTypes.BOOLEAN()),
# TinyInt
(-(2 ** 7), DataTypes.TINYINT()),
(2 ** 7 - 1, DataTypes.TINYINT()),
# SmallInt
(-(2 ** 15), DataTypes.SMALLINT()),
(2 ** 15 - 1, DataTypes.SMALLINT()),
# Int
(-(2 ** 31), DataTypes.INT()),
(2 ** 31 - 1, DataTypes.INT()),
# BigInt
(2 ** 64, DataTypes.BIGINT()),
# Float & Double
(1.0, DataTypes.FLOAT()),
(1.0, DataTypes.DOUBLE()),
# Decimal
(decimal.Decimal("1.0"), DataTypes.DECIMAL(10, 0)),
# Binary
(bytearray([1]), DataTypes.BINARY(1)),
# Date/Time/Timestamp
(datetime.date(2000, 1, 2), DataTypes.DATE()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.DATE()),
(datetime.time(1, 1, 2), DataTypes.TIME()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.TIMESTAMP()),
# Array
([], DataTypes.ARRAY(DataTypes.INT())),
(["1", None], DataTypes.ARRAY(DataTypes.STRING(nullable=True))),
([1, 2], DataTypes.ARRAY(DataTypes.INT())),
((1, 2), DataTypes.ARRAY(DataTypes.INT())),
(array.array('h', [1, 2]), DataTypes.ARRAY(DataTypes.INT())),
# Map
({}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": 1}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": None}, DataTypes.MAP(DataTypes.STRING(nullable=False), DataTypes.INT(True))),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# Char/VarChar (match anything but None)
(None, DataTypes.VARCHAR(1), ValueError),
(None, DataTypes.CHAR(1), ValueError),
# VarChar (length exceeds maximum length)
("abc", DataTypes.VARCHAR(1), ValueError),
# Char (length exceeds length)
("abc", DataTypes.CHAR(1), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, DataTypes.BOOLEAN(), TypeError),
("True", DataTypes.BOOLEAN(), TypeError),
([1], DataTypes.BOOLEAN(), TypeError),
# TinyInt
(-(2 ** 7) - 1, DataTypes.TINYINT(), ValueError),
(2 ** 7, DataTypes.TINYINT(), ValueError),
("1", DataTypes.TINYINT(), TypeError),
(1.0, DataTypes.TINYINT(), TypeError),
# SmallInt
(-(2 ** 15) - 1, DataTypes.SMALLINT(), ValueError),
(2 ** 15, DataTypes.SMALLINT(), ValueError),
# Int
(-(2 ** 31) - 1, DataTypes.INT(), ValueError),
(2 ** 31, DataTypes.INT(), ValueError),
# Float & Double
(1, DataTypes.FLOAT(), TypeError),
(1, DataTypes.DOUBLE(), TypeError),
# Decimal
(1.0, DataTypes.DECIMAL(10, 0), TypeError),
(1, DataTypes.DECIMAL(10, 0), TypeError),
("1.0", DataTypes.DECIMAL(10, 0), TypeError),
# Binary
(1, DataTypes.BINARY(1), TypeError),
# VarBinary (length exceeds maximum length)
(bytearray([1, 2]), DataTypes.VARBINARY(1), ValueError),
# Char (length exceeds length)
(bytearray([1, 2]), DataTypes.BINARY(1), ValueError),
# Date/Time/Timestamp
("2000-01-02", DataTypes.DATE(), TypeError),
("10:01:02", DataTypes.TIME(), TypeError),
(946811040, DataTypes.TIMESTAMP(), TypeError),
# Array
(["1", None], DataTypes.ARRAY(DataTypes.VARCHAR(1, nullable=False)), ValueError),
([1, "2"], DataTypes.ARRAY(DataTypes.INT()), TypeError),
# Map
({"a": 1}, DataTypes.MAP(DataTypes.INT(), DataTypes.INT()), TypeError),
({"a": "1"}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT()), TypeError),
({"a": None}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT(False)), ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_create_type_verifier(data_type.not_null())(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_create_type_verifier(data_type.not_null())(obj)
class DataTypeConvertTests(PyFlinkTestCase):
def test_basic_type(self):
test_types = [DataTypes.STRING(),
DataTypes.BOOLEAN(),
DataTypes.BYTES(),
DataTypes.TINYINT(),
DataTypes.SMALLINT(),
DataTypes.INT(),
DataTypes.BIGINT(),
DataTypes.FLOAT(),
DataTypes.DOUBLE(),
DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3)]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_atomic_type_with_data_type_with_parameters(self):
gateway = get_gateway()
JDataTypes = gateway.jvm.DataTypes
java_types = [JDataTypes.TIME(3).notNull(),
JDataTypes.TIMESTAMP(3).notNull(),
JDataTypes.VARBINARY(100).notNull(),
JDataTypes.BINARY(2).notNull(),
JDataTypes.VARCHAR(30).notNull(),
JDataTypes.CHAR(50).notNull(),
JDataTypes.DECIMAL(20, 10).notNull()]
converted_python_types = [_from_java_type(item) for item in java_types]
expected = [DataTypes.TIME(3, False),
DataTypes.TIMESTAMP(3).not_null(),
DataTypes.VARBINARY(100, False),
DataTypes.BINARY(2, False),
DataTypes.VARCHAR(30, False),
DataTypes.CHAR(50, False),
DataTypes.DECIMAL(20, 10, False)]
self.assertEqual(converted_python_types, expected)
# Legacy type tests
Types = gateway.jvm.org.apache.flink.table.api.Types
BlinkBigDecimalTypeInfo = \
gateway.jvm.org.apache.flink.table.runtime.typeutils.BigDecimalTypeInfo
java_types = [Types.STRING(),
Types.DECIMAL(),
BlinkBigDecimalTypeInfo(12, 5)]
converted_python_types = [_from_java_type(item) for item in java_types]
expected = [DataTypes.VARCHAR(2147483647),
DataTypes.DECIMAL(38, 18),
DataTypes.DECIMAL(12, 5)]
self.assertEqual(converted_python_types, expected)
def test_array_type(self):
# nullable/not_null flag will be lost during the conversion.
test_types = [DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_multiset_type(self):
test_types = [DataTypes.MULTISET(DataTypes.BIGINT()),
DataTypes.MULTISET(DataTypes.STRING()),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.BIGINT())),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.STRING()))]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_map_type(self):
test_types = [DataTypes.MAP(DataTypes.BIGINT(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()))]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_row_type(self):
test_types = [DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b",
DataTypes.ROW(
[DataTypes.FIELD("c",
DataTypes.STRING())]))])]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_list_view_type(self):
test_types = [DataTypes.LIST_VIEW(DataTypes.BIGINT()),
DataTypes.LIST_VIEW(DataTypes.STRING())]
java_types = [_to_java_type(item) for item in test_types]
converted_python_types = [_from_java_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
class DataSerializerTests(PyFlinkTestCase):
def test_java_pickle_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = PickleSerializer()
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, False))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
def test_java_batch_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(PickleSerializer(), 2)
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import glob
import heapq
import logging
import os
import os.path
import random
import re
import shutil
import subprocess as subprocess
import sys
import tempfile
import time
from catapult_base import cloud_storage
from telemetry.internal.util import binary_manager
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import chrome_browser_backend
from telemetry.internal.util import path
def ParseCrashpadDateTime(date_time_str):
# Python strptime does not support time zone parsing, strip it.
date_time_parts = date_time_str.split()
if len(date_time_parts) >= 3:
date_time_str = ' '.join(date_time_parts[:2])
return datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
class DesktopBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a locally-executed browser instance, on Linux,
Mac or Windows.
"""
def __init__(self, desktop_platform_backend, browser_options, executable,
flash_path, is_content_shell, browser_directory,
output_profile_path, extensions_to_load):
super(DesktopBrowserBackend, self).__init__(
desktop_platform_backend,
supports_tab_control=not is_content_shell,
supports_extensions=not is_content_shell,
browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
# Initialize fields so that an explosion during init doesn't break in Close.
self._proc = None
self._tmp_profile_dir = None
self._tmp_output_file = None
self._executable = executable
if not self._executable:
raise Exception('Cannot create browser, no executable found!')
assert not flash_path or os.path.exists(flash_path)
self._flash_path = flash_path
self._is_content_shell = is_content_shell
if len(extensions_to_load) > 0 and is_content_shell:
raise browser_backend.ExtensionsNotSupportedException(
'Content shell does not support extensions.')
self._browser_directory = browser_directory
self._port = None
self._tmp_minidump_dir = tempfile.mkdtemp()
self._crash_service = None
if self.browser_options.enable_logging:
self._log_file_path = os.path.join(tempfile.mkdtemp(), 'chrome.log')
else:
self._log_file_path = None
self._SetupProfile()
@property
def log_file_path(self):
return self._log_file_path
@property
def supports_uploading_logs(self):
return (self.browser_options.logs_cloud_bucket and
self.browser_options.logs_cloud_remote_path and
os.path.isfile(self.log_file_path))
def _SetupProfile(self):
if not self.browser_options.dont_override_profile:
if self._output_profile_path:
self._tmp_profile_dir = self._output_profile_path
else:
self._tmp_profile_dir = tempfile.mkdtemp()
profile_dir = self.browser_options.profile_dir
if profile_dir:
assert self._tmp_profile_dir != profile_dir
if self._is_content_shell:
logging.critical('Profiles cannot be used with content shell')
sys.exit(1)
logging.info("Using profile directory:'%s'." % profile_dir)
shutil.rmtree(self._tmp_profile_dir)
shutil.copytree(profile_dir, self._tmp_profile_dir)
if self.browser_options.use_devtools_active_port:
# No matter whether we're using an existing profile directory or
# creating a new one, always delete the well-known file containing
# the active DevTools port number.
port_file = self._GetDevToolsActivePortPath()
if os.path.isfile(port_file):
try:
os.remove(port_file)
except Exception as e:
logging.critical('Unable to remove DevToolsActivePort file: %s' % e)
sys.exit(1)
def _GetDevToolsActivePortPath(self):
return os.path.join(self.profile_directory, 'DevToolsActivePort')
def _GetCrashServicePipeName(self):
# Ensure a unique pipe name by using the name of the temp dir.
pipe = r'\\.\pipe\%s_service' % os.path.basename(self._tmp_minidump_dir)
return pipe
def _StartCrashService(self):
os_name = self.browser.platform.GetOSName()
if os_name != 'win':
return None
arch_name = self.browser.platform.GetArchName()
command = binary_manager.FetchPath('crash_service', arch_name, os_name)
if not command:
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
if not os.path.exists(command):
logging.warning('crash_service.exe not found for %s %s',
arch_name, os_name)
return None
try:
crash_service = subprocess.Popen([
command,
'--no-window',
'--dumps-dir=%s' % self._tmp_minidump_dir,
'--pipe-name=%s' % self._GetCrashServicePipeName()])
except:
logging.error(
'Failed to run %s --no-window --dump-dir=%s --pip-name=%s' % (
command, self._tmp_minidump_dir, self._GetCrashServicePipeName()))
logging.error('Running on platform: %s and arch: %s.' %os_name, arch_name)
raise
return crash_service
def _GetCdbPath(self):
possible_paths = (
'Debugging Tools For Windows',
'Debugging Tools For Windows (x86)',
'Debugging Tools For Windows (x64)',
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x86'),
os.path.join('Windows Kits', '8.0', 'Debuggers', 'x64'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x86'),
os.path.join('win_toolchain', 'vs2013_files', 'win8sdk', 'Debuggers',
'x64'),
)
for possible_path in possible_paths:
app_path = os.path.join(possible_path, 'cdb.exe')
app_path = path.FindInstalledWindowsApplication(app_path)
if app_path:
return app_path
return None
def HasBrowserFinishedLaunching(self):
# In addition to the functional check performed by the base class, quickly
# check if the browser process is still alive.
if not self.IsBrowserRunning():
raise exceptions.ProcessGoneException(
"Return code: %d" % self._proc.returncode)
if self.browser_options.use_devtools_active_port:
# The Telemetry user selected the new code path to start DevTools on
# an ephemeral port. Wait for the well-known file containing the port
# number to exist.
port_file = self._GetDevToolsActivePortPath()
if not os.path.isfile(port_file):
# File isn't ready yet. Return false. Will retry.
return False
# Attempt to avoid reading the file until it's populated.
got_port = False
try:
if os.stat(port_file).st_size > 0:
with open(port_file) as f:
port_string = f.read()
self._port = int(port_string)
logging.info('Discovered ephemeral port %s' % self._port)
got_port = True
except Exception:
# Both stat and open can throw exceptions.
pass
if not got_port:
# File isn't ready yet. Return false. Will retry.
return False
return super(DesktopBrowserBackend, self).HasBrowserFinishedLaunching()
def GetBrowserStartupArgs(self):
args = super(DesktopBrowserBackend, self).GetBrowserStartupArgs()
if self.browser_options.use_devtools_active_port:
self._port = 0
else:
self._port = util.GetUnreservedAvailableLocalPort()
logging.info('Requested remote debugging port: %d' % self._port)
args.append('--remote-debugging-port=%i' % self._port)
args.append('--enable-crash-reporter-for-testing')
if not self._is_content_shell:
args.append('--window-size=1280,1024')
if self._flash_path:
args.append('--ppapi-flash-path=%s' % self._flash_path)
if not self.browser_options.dont_override_profile:
args.append('--user-data-dir=%s' % self._tmp_profile_dir)
return args
def Start(self):
assert not self._proc, 'Must call Close() before Start()'
args = [self._executable]
args.extend(self.GetBrowserStartupArgs())
if self.browser_options.startup_url:
args.append(self.browser_options.startup_url)
env = os.environ.copy()
env['CHROME_HEADLESS'] = '1' # Don't upload minidumps.
env['BREAKPAD_DUMP_LOCATION'] = self._tmp_minidump_dir
env['CHROME_BREAKPAD_PIPE_NAME'] = self._GetCrashServicePipeName()
if self.browser_options.enable_logging:
sys.stderr.write(
'Chrome log file will be saved in %s\n' % self.log_file_path)
env['CHROME_LOG_FILE'] = self.log_file_path
self._crash_service = self._StartCrashService()
logging.info('Starting Chrome %s', args)
if not self.browser_options.show_stdout:
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
args, stdout=self._tmp_output_file, stderr=subprocess.STDOUT, env=env)
else:
self._proc = subprocess.Popen(args, env=env)
try:
self._WaitForBrowserToComeUp()
# browser is foregrounded by default on Windows and Linux, but not Mac.
if self.browser.platform.GetOSName() == 'mac':
subprocess.Popen([
'osascript', '-e', ('tell application "%s" to activate' %
self._executable)])
self._InitDevtoolsClientBackend()
if self._supports_extensions:
self._WaitForExtensionsToLoad()
except:
self.Close()
raise
@property
def pid(self):
if self._proc:
return self._proc.pid
return None
@property
def browser_directory(self):
return self._browser_directory
@property
def profile_directory(self):
return self._tmp_profile_dir
def IsBrowserRunning(self):
return self._proc and self._proc.poll() == None
def GetStandardOutput(self):
if not self._tmp_output_file:
if self.browser_options.show_stdout:
# This can happen in the case that loading the Chrome binary fails.
# We print rather than using logging here, because that makes a
# recursive call to this function.
print >> sys.stderr, "Can't get standard output with --show-stdout"
return ''
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
def _GetMostRecentCrashpadMinidump(self):
os_name = self.browser.platform.GetOSName()
arch_name = self.browser.platform.GetArchName()
crashpad_database_util = binary_manager.FetchPath(
'crashpad_database_util', arch_name, os_name)
if not crashpad_database_util:
return None
report_output = subprocess.check_output([
crashpad_database_util, '--database=' + self._tmp_minidump_dir,
'--show-pending-reports', '--show-completed-reports',
'--show-all-report-info'])
last_indentation = -1
reports_list = []
report_dict = {}
for report_line in report_output.splitlines():
# Report values are grouped together by the same indentation level.
current_indentation = 0
for report_char in report_line:
if not report_char.isspace():
break
current_indentation += 1
# Decrease in indentation level indicates a new report is being printed.
if current_indentation >= last_indentation:
report_key, report_value = report_line.split(':', 1)
if report_value:
report_dict[report_key.strip()] = report_value.strip()
elif report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
finally:
report_dict = {}
last_indentation = current_indentation
# Include the last report.
if report_dict:
try:
report_time = ParseCrashpadDateTime(report_dict['Creation time'])
report_path = report_dict['Path'].strip()
reports_list.append((report_time, report_path))
except (ValueError, KeyError) as e:
logging.warning('Crashpad report expected valid keys'
' "Path" and "Creation time": %s', e)
if reports_list:
_, most_recent_report_path = max(reports_list)
return most_recent_report_path
return None
def _GetMostRecentMinidump(self):
# Crashpad dump layout will be the standard eventually, check it first.
most_recent_dump = self._GetMostRecentCrashpadMinidump()
# Typical breakpad format is simply dump files in a folder.
if not most_recent_dump:
dumps = glob.glob(os.path.join(self._tmp_minidump_dir, '*.dmp'))
if dumps:
most_recent_dump = heapq.nlargest(1, dumps, os.path.getmtime)[0]
# As a sanity check, make sure the crash dump is recent.
if (most_recent_dump and
os.path.getmtime(most_recent_dump) < (time.time() - (5 * 60))):
logging.warning('Crash dump is older than 5 minutes. May not be correct.')
return most_recent_dump
def _IsExecutableStripped(self):
if self.browser.platform.GetOSName() == 'mac':
symbols = subprocess.check_output(['/usr/bin/nm', self._executable])
num_symbols = len(symbols.splitlines())
# We assume that if there are more than 10 symbols the executable is not
# stripped.
return num_symbols < 10
else:
return False
def _GetStackFromMinidump(self, minidump):
os_name = self.browser.platform.GetOSName()
if os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
output = subprocess.check_output([cdb, '-y', self._browser_directory,
'-c', '.ecxr;k30;q', '-z', minidump])
# cdb output can start the stack with "ChildEBP", "Child-SP", and possibly
# other things we haven't seen yet. If we can't find the start of the
# stack, include output from the beginning.
stack_start = 0
stack_start_match = re.search("^Child(?:EBP|-SP)", output, re.MULTILINE)
if stack_start_match:
stack_start = stack_start_match.start()
stack_end = output.find('quit:')
return output[stack_start:stack_end]
arch_name = self.browser.platform.GetArchName()
stackwalk = binary_manager.FetchPath(
'minidump_stackwalk', arch_name, os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_path = os.path.join(self._tmp_minidump_dir, 'symbols')
symbols = glob.glob(os.path.join(self._browser_directory, '*.breakpad*'))
if symbols:
for symbol in sorted(symbols, key=os.path.getmtime, reverse=True):
if not os.path.isfile(symbol):
continue
with open(symbol, 'r') as f:
fields = f.readline().split()
if not fields:
continue
sha = fields[3]
binary = ' '.join(fields[4:])
symbol_path = os.path.join(symbols_path, binary, sha)
if os.path.exists(symbol_path):
continue
os.makedirs(symbol_path)
shutil.copyfile(symbol, os.path.join(symbol_path, binary + '.sym'))
else:
# On some platforms generating the symbol table can be very time
# consuming, skip it if there's nothing to dump.
if self._IsExecutableStripped():
logging.info('%s appears to be stripped, skipping symbol dump.' % (
self._executable))
return
logging.info('Dumping breakpad symbols.')
generate_breakpad_symbols_path = os.path.join(
util.GetChromiumSrcDir(), "components", "crash",
"tools", "generate_breakpad_symbols.py")
cmd = [
sys.executable,
generate_breakpad_symbols_path,
'--binary=%s' % self._executable,
'--symbols-dir=%s' % symbols_path,
'--build-dir=%s' % self._browser_directory,
]
try:
subprocess.check_output(cmd, stderr=open(os.devnull, 'w'))
except subprocess.CalledProcessError:
logging.warning('Failed to execute "%s"' % ' '.join(cmd))
return None
return subprocess.check_output([stackwalk, minidump, symbols_path],
stderr=open(os.devnull, 'w'))
def _UploadMinidumpToCloudStorage(self, minidump_path):
""" Upload minidump_path to cloud storage and return the cloud storage url.
"""
remote_path = ('minidump-%s-%i.dmp' %
(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'),
random.randint(0, 1000000)))
try:
return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT, remote_path,
minidump_path)
except cloud_storage.CloudStorageError as err:
logging.error('Cloud storage error while trying to upload dump: %s' %
repr(err))
return '<Missing link>'
def GetStackTrace(self):
most_recent_dump = self._GetMostRecentMinidump()
if not most_recent_dump:
return 'No crash dump found. Returning browser stdout:\n' + (
self.GetStandardOutput())
logging.info('minidump found: %s' % most_recent_dump)
stack = self._GetStackFromMinidump(most_recent_dump)
if not stack:
cloud_storage_link = self._UploadMinidumpToCloudStorage(most_recent_dump)
return ('Failed to symbolize minidump. Raw stack is uploaded to cloud '
'storage: %s. Returning browser stdout:\n%s' % (
cloud_storage_link, self.GetStandardOutput()))
return stack
def __del__(self):
self.Close()
def _TryCooperativeShutdown(self):
if self.browser.platform.IsCooperativeShutdownSupported():
# Ideally there would be a portable, cooperative shutdown
# mechanism for the browser. This seems difficult to do
# correctly for all embedders of the content API. The only known
# problem with unclean shutdown of the browser process is on
# Windows, where suspended child processes frequently leak. For
# now, just solve this particular problem. See Issue 424024.
if self.browser.platform.CooperativelyShutdown(self._proc, "chrome"):
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
logging.info('Successfully shut down browser cooperatively')
except exceptions.TimeoutException as e:
logging.warning('Failed to cooperatively shutdown. ' +
'Proceeding to terminate: ' + str(e))
def Close(self):
super(DesktopBrowserBackend, self).Close()
if self.IsBrowserRunning():
self._TryCooperativeShutdown()
# Shutdown politely if the profile may be used again.
if self._output_profile_path and self.IsBrowserRunning():
self._proc.terminate()
try:
util.WaitFor(lambda: not self.IsBrowserRunning(), timeout=5)
self._proc = None
except exceptions.TimeoutException:
logging.warning('Failed to gracefully shutdown. Proceeding to kill.')
# Shutdown aggressively if the above failed or if the profile is temporary.
if self.IsBrowserRunning():
self._proc.kill()
self._proc = None
if self._crash_service:
self._crash_service.kill()
self._crash_service = None
if self._output_profile_path:
# If we need the output then double check that it exists.
if not (self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir)):
raise Exception("No profile directory generated by Chrome: '%s'." %
self._tmp_profile_dir)
else:
# If we don't need the profile after the run then cleanup.
if self._tmp_profile_dir and os.path.exists(self._tmp_profile_dir):
shutil.rmtree(self._tmp_profile_dir, ignore_errors=True)
self._tmp_profile_dir = None
if self._tmp_output_file:
self._tmp_output_file.close()
self._tmp_output_file = None
| |
import os
import fnmatch
import shutil
class PUFileDirector:
"""
The PUFileDirector class provides methods that return various types of file paths
to be used to create/read/write data files pertaining to camera data, sensor data,
experiment analysis, etc.
Constructor:
PUFileDirector(base_file_directory)
Methods:
SetPatientId(patientId) - Sets the patient
SetSystemType(systemType) - Sets the system type ('prevention', 'assessment')
SetWoundId(woundId) - Sets the wound
SetAssessmentId(assessmentId) - Sets the assessment id
SetExperimentId(experimentId) - Sets the experiment id
SetSessionId(sessionId) - Sets the prevention session id
GetDepthFileDirectory() - Gets the file directory for depth files
GetRgbFileDirectory() - Gets the file directory for rgb files
GetThermalFileDirectory() - Gets the file directory for thermal files
GetMultiSpectralFileDirectory() - Gets the file directory for multi-spectral files
GetBiochemicalFileDirectory() - Gets the file directory for biochemical files
CleanDepthFileDirectory() - Cleans the file directory for depth files
CleanRgbFileDirectory() - Cleans the file directory for rgb files
CleanThermalFileDirectory() - Cleans the file directory for thermal files
CleanMultiSpectralFileDirectory() - Cleans the file directory for multi-spectral files
CleanBiochemicalFileDirectory() - Cleans the file directory for biochemical files
"""
preventionName = "prevention"
assessmentName = "assessment"
depthName = "depth"
rgbName = "rgb"
thermalName = "thermal"
mutlispectralName = "multispectral"
biochemicalName = "biochemical"
expermimentName = "experiments"
def __init__(self, base_file_directory):
"""
Initializes the class with the base file directory
"""
self.base_file_directory = base_file_directory
def SetPatientId(self, patientId):
"""
Sets the patient
"""
self.patientId = patientId
def SetSystemType(self,systemType):
"""
Sets the system type ('prevention', 'assessment')
"""
self.systemType = systemType
def SetWoundId(self, woundId):
"""
Sets the wound
"""
self.woundId = woundId
def SetAssessmentId(self, assessmentId):
"""
Sets the assessment id
"""
self.assessmentId = assessmentId
def SetExperimentId(self, experimentId):
"""
Sets the experiment id
"""
self.experimentId = experimentId
def SetSessionId(self, sessionId):
"""
Sets the prevention session id
"""
self.sessionId = sessionId
def GetDepthFileDirectory(self):
"""
Gets the file directory for depth files
"""
if not hasattr(self, 'patientId'):
raise PUFileDirectorException("Patient Id not set")
if not hasattr(self, 'systemType'):
raise PUFileDirectorException("System Type not set")
if self.systemType == "prevention":
if not hasattr(self, 'sessionId'):
raise PUFileDirectorException("Session Id not set")
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.preventionName,
str(self.sessionId), PUFileDirector.depthName)
else:
if not hasattr(self, 'woundId'):
raise PUFileDirectorException("Wound Id not set")
if not hasattr(self, 'assessmentId'):
raise PUFileDirectorException("Assessment Id not set")
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.assessmentName,
str(self.woundId), str(self.assessmentId), PUFileDirector.depthName)
def GetRgbFileDirectory(self):
"""Gets the file directory for rgb files
"""
if not hasattr(self, 'patientId'):
raise PUFileDirectorException("Patient Id not set")
if not hasattr(self, 'systemType'):
raise PUFileDirectorException("System Type not set")
if self.systemType == "prevention":
raise PUFileDirectorException("System Type not assessment")
if not hasattr(self, 'woundId'):
raise PUFileDirectorException("Wound Id not set")
if not hasattr(self, 'assessmentId'):
raise PUFileDirectorException("Assessment Id not set")
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.assessmentName,
str(self.woundId), str(self.assessmentId), PUFileDirector.rgbName)
def GetThermalFileDirectory(self):
"""
Gets the file directory for thermal files
"""
if not hasattr(self, 'patientId'):
raise PUFileDirectorException("Patient Id not set")
if not hasattr(self, 'systemType'):
raise PUFileDirectorException("System Type not set")
if self.systemType == "prevention":
raise PUFileDirectorException("System Type not assessment")
if not hasattr(self, 'woundId'):
raise PUFileDirectorException("Wound Id not set")
if not hasattr(self, 'assessmentId'):
raise PUFileDirectorException("Assessment Id not set")
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.assessmentName,
str(self.woundId), str(self.assessmentId), PUFileDirector.thermalName)
def GetMultiSpectralFileDirectory(self):
"""
Gets the file directory for multi-spectral files
"""
if not hasattr(self, 'patientId'):
raise PUFileDirectorException("Patient Id not set")
if not hasattr(self, 'systemType'):
raise PUFileDirectorException("System Type not set")
if self.systemType == "prevention":
raise PUFileDirectorException("System Type not assessment")
if not hasattr(self, 'woundId'):
raise PUFileDirectorException("Wound Id not set")
if not hasattr(self, 'assessmentId'):
raise PUFileDirectorException("Assessment Id not set")
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.assessmentName,
str(self.woundId), str(self.assessmentId), PUFileDirector.mutlispectralName)
def GetBiochemicalFileDirectory(self):
"""
Gets the file directory for biochemical files
"""
if not hasattr(self, 'patientId'):
raise PUFileDirectorException("Patient Id not set")
if not hasattr(self, 'systemType'):
raise PUFileDirectorException("System Type not set")
if self.systemType == "prevention":
raise PUFileDirectorException("System Type not assessment")
if not hasattr(self, 'woundId'):
raise PUFileDirectorException("Wound Id not set")
if not hasattr(self, 'assessmentId'):
raise PUFileDirectorException("Assessment Id not set")
print "joining path"
return os.path.join(self.base_file_directory, str(self.patientId), PUFileDirector.assessmentName,
str(self.woundId), str(self.assessmentId), PUFileDirector.biochemicalName)
def CleanRgbFileDirectory(self):
"""Cleans the file directory for rgb files
"""
d = self.GetRgbFileDirectory()
if d != "":
for f in os.listdir(d):
if fnmatch.fnmatch(f,'north') or fnmatch.fnmatch(f,'south') or fnmatch.fnmatch(f,'east') or fnmatch.fnmatch(f,'west') or fnmatch.fnmatch(f,'center'):
fpath = os.path.join(d,f)
print "Deleting directory: " + fpath
shutil.rmtree(fpath)
def CleanDepthFileDirectory(self):
"""Cleans the file directory for depth files
"""
d = self.GetDepthFileDirectory()
if d != "":
for f in os.listdir(d):
if fnmatch.fnmatch(f,'north') or fnmatch.fnmatch(f,'south') or fnmatch.fnmatch(f,'east') or fnmatch.fnmatch(f,'west') or fnmatch.fnmatch(f,'center'):
fpath = os.path.join(d,f)
print "Deleting directory: " + fpath
shutil.rmtree(fpath)
def CleanThermalFileDirectory(self):
"""Cleans the file directory for thermal files
"""
d = self.GetThermalFileDirectory()
if d != "":
for f in os.listdir(d):
if fnmatch.fnmatch(f,'north') or fnmatch.fnmatch(f,'south') or fnmatch.fnmatch(f,'east') or fnmatch.fnmatch(f,'west') or fnmatch.fnmatch(f,'center'):
fpath = os.path.join(d,f)
print "Deleting directory: " + fpath
shutil.rmtree(fpath)
def CleanMultiSpectralFileDirectory(self):
"""Cleans the file directory for Multi-Spectral files
"""
d = self.GetMultiSpectralFileDirectory()
if d != "":
for f in os.listdir(d):
if fnmatch.fnmatch(f,'north') or fnmatch.fnmatch(f,'south') or fnmatch.fnmatch(f,'east') or fnmatch.fnmatch(f,'west') or fnmatch.fnmatch(f,'center'):
fpath = os.path.join(d,f)
print "Deleting directory: " + fpath
shutil.rmtree(fpath)
def CleanBiochemicalFileDirectory(self):
"""Cleans the file directory for Biochemical files
"""
d = self.GetBiochemicalFileDirectory()
if d != "":
for f in os.listdir(d):
if fnmatch.fnmatch(f,'*.dat'):
fpath = os.path.join(d,f)
print "Deleting file: " + fpath
os.remove(fpath)
class PUFileDirectorException(Exception):
"""
Exception definition for exceptions raised in the PUFileDirector class
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| |
"""DXR's concept of version control systems
The main entry points are `tree_to_repos`, which produces a mapping of roots
to VCS objects for each version control root discovered under the provided
tree, and `path_to_vcs`, which returns a VCS object for the version control
system that tracks the given path. Currently supported VCSs are Mercurial,
Git, and Perforce.
Currently supported upstream views:
- Git (GitHub)
- Mercurial (hgweb)
TODO:
- Add gitweb support for git.
- Add cvs, svn, bzr support.
- Produce in-DXR blame information using VCSs.
- Check if the mercurial paths are specific to Mozilla's customization or not.
"""
import marshal
import os
from os.path import relpath, join, split
from pkg_resources import resource_filename
import subprocess
import urlparse
from warnings import warn
import hglib
from ordereddict import OrderedDict
from dxr.utils import without_ending
class Vcs(object):
"""A class representing an abstract notion of a version-control system.
In general, all path arguments to query methods should be normalized to be
relative to the root directory of the VCS.
"""
def __init__(self, root):
self.root = root
def get_root_dir(self):
"""Return the directory that is at the root of the VCS."""
return self.root
def get_vcs_name(self):
"""Return a recognizable name for the VCS."""
return type(self).__name__
@classmethod
def invoke_vcs(cls, args, cwd, **kwargs):
"""Return the result of invoking the VCS command on the repository from
given working directory, with extra kwargs passed along to the Popen constructor.
"""
return subprocess.check_output([cls.command] + args, cwd=cwd, **kwargs)
def is_tracked(self, path):
"""Does the repository track this file?"""
return NotImplemented
def generate_log(self, path):
"""Construct URL to upstream view of log of file at path."""
return NotImplemented
def generate_diff(self, path):
"""Construct URL to upstream view of diff of file at path."""
return NotImplemented
def generate_blame(self, path):
"""Construct URL to upstream view of blame on file at path."""
return NotImplemented
def generate_raw(self, path):
"""Construct URL to upstream view to raw file at path."""
return NotImplemented
@classmethod
def get_contents(cls, path, revision, stderr=None):
"""Return contents of file at specified path at given revision, where path is an
absolute path."""
return NotImplemented
def display_rev(self, path):
"""Return a human-readable revision identifier for the repository."""
return NotImplemented
class Mercurial(Vcs):
command = 'hg'
def __init__(self, root):
super(Mercurial, self).__init__(root)
hgext = resource_filename('dxr', 'hgext/previous_revisions.py')
with hglib.open(root,
configs=['extensions.previous_revisions=%s' % hgext]) as client:
tip = client.tip()
self.revision = tip.node
self.previous_revisions = self.find_previous_revisions(client)
self.upstream = self._construct_upstream_url()
def _construct_upstream_url(self):
upstream = urlparse.urlparse(self.invoke_vcs(['paths', 'default'], self.root).strip())
recomb = list(upstream)
if upstream.scheme == 'ssh':
recomb[0] = 'http'
recomb[1] = upstream.hostname # Eliminate any username stuff
# check if port is defined and add that to the url
if upstream.port:
recomb[1] += ":{}".format(upstream.port)
recomb[2] = '/' + recomb[2].lstrip('/') # strip all leading '/', add one back
if not upstream.path.endswith('/'):
recomb[2] += '/' # Make sure we have a '/' on the end
recomb[3] = recomb[4] = recomb[5] = '' # Just those three
return urlparse.urlunparse(recomb)
def find_previous_revisions(self, client):
"""Find the last revision in which each file changed, for diff links.
Return a mapping {path: last commit nodes in which file at path changed}
"""
last_change = {}
for line in client.rawcommand(['previous-revisions']).splitlines():
node, path = line.split(':', 1)
last_change[path] = node
return last_change
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if '.hg' in dirs:
dirs.remove('.hg')
return cls(path)
return None
def display_rev(self, path):
return self.revision[:12]
def is_tracked(self, path):
return path in self.previous_revisions
def generate_raw(self, path):
return self.upstream + 'raw-file/' + self.revision + '/' + path
def generate_diff(self, path):
# We generate link to diff with the last revision in which the file changed.
return self.upstream + 'diff/' + self.previous_revisions[path] + '/' + path
def generate_blame(self, path):
return self.upstream + 'annotate/' + self.revision + '/' + path
def generate_log(self, path):
return self.upstream + 'filelog/' + self.revision + '/' + path
@classmethod
def get_contents(cls, path, revision, stderr=None):
head, tail = split(path)
return cls.invoke_vcs(['cat', '-r', revision, tail], head, stderr=stderr)
class Git(Vcs):
command = 'git'
def __init__(self, root):
super(Git, self).__init__(root)
self.tracked_files = set(line for line in
self.invoke_vcs(['ls-files'], self.root).splitlines())
self.revision = self.invoke_vcs(['rev-parse', 'HEAD'], self.root).strip()
self.upstream = self._construct_upstream_url()
def _construct_upstream_url(self):
source_urls = self.invoke_vcs(['remote', '-v'], self.root).split('\n')
for src_url in source_urls:
name, repo, _ = src_url.split()
# TODO: Why do we assume origin is upstream?
if name == 'origin':
if repo.startswith("git@github.com:"):
return "https://github.com/" + repo[len("git@github.com:"):]
elif repo.startswith(("git://github.com/", "https://github.com/")):
repo = without_ending('.git', repo)
if repo.startswith("git:"):
repo = "https" + repo[len("git"):]
return repo
warn("Your git remote is not supported yet. Please use a "
"GitHub remote if you would like version control "
"naviagtion links to show.")
break
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if '.git' in dirs:
dirs.remove('.git')
return cls(path)
return None
def display_rev(self, path):
return self.revision[:10]
def is_tracked(self, path):
return path in self.tracked_files
def generate_raw(self, path):
return self.upstream + "/raw/" + self.revision + "/" + path
def generate_diff(self, path):
# I really want to make this anchor on the file in question, but github
# doesn't seem to do that nicely
return self.upstream + "/commit/" + self.revision
def generate_blame(self, path):
return self.upstream + "/blame/" + self.revision + "/" + path
def generate_log(self, path):
return self.upstream + "/commits/" + self.revision + "/" + path
@classmethod
def get_contents(cls, path, revision, stderr=None):
head, tail = split(path)
return cls.invoke_vcs(['show', revision + ':./' + tail], head, stderr=stderr)
class Perforce(Vcs):
command = 'p4'
def __init__(self, root, upstream):
super(Perforce, self).__init__(root)
have = self._p4run(['have'])
self.have = dict((x['path'][len(root) + 1:], x) for x in have)
self.upstream = upstream
@classmethod
def claim_vcs_source(cls, path, dirs, tree):
if 'P4CONFIG' not in os.environ:
return None
if os.path.exists(os.path.join(path, os.environ['P4CONFIG'])):
return cls(path, tree.p4web_url)
return None
def _p4run(self, args):
ret = []
env = os.environ
env["PWD"] = self.root
proc = subprocess.Popen(['p4', '-G'] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.root,
env=env)
while True:
try:
x = marshal.load(proc.stdout)
except EOFError:
break
ret.append(x)
return ret
def is_tracked(self, path):
return path in self.have
def generate_raw(self, path):
info = self.have[path]
return self.upstream + info['depotFile'] + '?ac=98&rev1=' + info['haveRev']
def generate_diff(self, path):
info = self.have[path]
haveRev = info['haveRev']
prevRev = str(int(haveRev) - 1)
return (self.upstream + info['depotFile'] + '?ac=19&rev1=' + prevRev +
'&rev2=' + haveRev)
def generate_blame(self, path):
info = self.have[path]
return self.upstream + info['depotFile'] + '?ac=193'
def generate_log(self, path):
info = self.have[path]
return self.upstream + info['depotFile'] + '?ac=22#' + info['haveRev']
def display_rev(self, path):
info = self.have[path]
return '#' + info['haveRev']
every_vcs = [Mercurial, Git, Perforce]
def tree_to_repos(tree):
"""Given a TreeConfig, return a mapping {root: Vcs object} where root is a
directory under tree.source_folder where root is a directory under
tree.source_folder. Traversal of the returned mapping follows the order of
deepest directory first.
:arg tree: TreeConfig object representing a source code tree
"""
sources = {}
# Find all of the VCSs in the source directory:
# We may see multiple VCS if we use git submodules, for example.
for cwd, dirs, files in os.walk(tree.source_folder):
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(cwd, dirs, tree)
if attempt is not None:
sources[attempt.root] = attempt
# It's possible that the root of the tree is not a VCS by itself, so walk up
# the hierarchy until we find a parent folder that is a VCS. If we can't
# find any, then no VCSs exist for the top level of this repository.
directory = tree.source_folder
while directory != '/' and directory not in sources:
directory = os.path.dirname(directory)
for vcs in every_vcs:
attempt = vcs.claim_vcs_source(directory, os.listdir(directory), tree)
if attempt is not None:
sources[directory] = attempt
lookup_order = sorted(sources.keys(), key=len, reverse=True)
# We want to make sure that we look up source repositories by deepest
# directory first.
ordered_sources = OrderedDict()
for key in lookup_order:
ordered_sources[key] = sources[key]
return ordered_sources
def file_contents_at_rev(abspath, revision):
"""Attempt to return the contents of a file at a specific revision."""
with open(os.devnull, 'w') as devnull:
for cls in [Mercurial, Git]:
try:
return cls.get_contents(abspath, revision, stderr=devnull)
except subprocess.CalledProcessError:
continue
return None
class VcsCache(object):
"""This class offers a way to obtain Vcs objects for any file within a
given tree."""
def __init__(self, tree):
"""Construct a VcsCache for the given tree.
:arg tree: TreeConfig object representing a source code tree
"""
self.tree = tree
self.repos = tree_to_repos(tree)
self._path_cache = {}
def vcs_for_path(self, path):
"""Given a tree and a path in the tree, find a source repository we
know about that claims to track that file.
:arg string path: a path to a file (not a folder)
"""
if path in self._path_cache:
return self._path_cache[path]
abs_path = join(self.tree.source_folder, path)
for directory, vcs in self.repos.iteritems():
# This seems to be the easiest way to find "is abs_path in the subtree
# rooted at directory?"
if relpath(abs_path, directory).startswith('..'):
continue
if vcs.is_tracked(relpath(abs_path, vcs.get_root_dir())):
self._path_cache[path] = vcs
break
return self._path_cache.get(path)
| |
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.common import utils as utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.db2 import system
from trove.guestagent.datastore import service
from trove.guestagent.db import models
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class DB2App(object):
"""
Handles installation and configuration of DB2
on a Trove instance.
"""
def __init__(self, status, state_change_wait_time=None):
LOG.debug("Initialize DB2App.")
self.state_change_wait_time = (
state_change_wait_time if state_change_wait_time else
CONF.state_change_wait_time
)
LOG.debug("state_change_wait_time = %s." % self.state_change_wait_time)
self.status = status
def update_hostname(self):
"""
When DB2 server is installed, it uses the hostname of the
instance were the image was built. This needs to be updated
to reflect the guest instance.
"""
LOG.debug("Update the hostname of the DB2 instance.")
try:
run_command(system.UPDATE_HOSTNAME,
superuser='root')
except exception.ProcessExecutionError:
raise RuntimeError(_("Command to update the hostname failed."))
def change_ownership(self, mount_point):
"""
When DB2 server instance is installed, it does not have the
DB2 local database directory created (/home/db2inst1/db2inst1).
This gets created when we mount the cinder volume. So we need
to change ownership of this directory to the DB2 instance user
- db2inst1.
"""
LOG.debug("Changing ownership of the DB2 data directory.")
try:
operating_system.chown(mount_point,
system.DB2_INSTANCE_OWNER,
system.DB2_INSTANCE_OWNER,
recursive=False, as_root=True)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to change ownership of DB2 data directory failed."))
def _enable_db_on_boot(self):
LOG.debug("Enable DB on boot.")
try:
run_command(system.ENABLE_AUTOSTART)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to enable DB2 server on boot failed."))
def _disable_db_on_boot(self):
LOG.debug("Disable DB2 on boot.")
try:
run_command(system.DISABLE_AUTOSTART)
except exception.ProcessExecutionError:
raise RuntimeError(_(
"Command to disable DB2 server on boot failed."))
def start_db_with_conf_changes(self, config_contents):
'''
Will not be implementing configuration change API for DB2 in
the Kilo release. Currently all that this method does is to start
the DB2 server without any configuration changes. Looks like
this needs to be implemented to enable volume resize on the guest
agent side.
'''
LOG.info(_("Starting DB2 with configuration changes."))
self.start_db(True)
def start_db(self, update_db=False):
LOG.debug("Start the DB2 server instance.")
self._enable_db_on_boot()
try:
run_command(system.START_DB2)
except exception.ProcessExecutionError:
pass
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start of DB2 server instance failed."))
self.status.end_restart()
raise RuntimeError(_("Could not start DB2."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.debug("Stop the DB2 server instance.")
if do_not_start_on_reboot:
self._disable_db_on_boot()
try:
run_command(system.STOP_DB2)
except exception.ProcessExecutionError:
pass
if not (self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db)):
LOG.error(_("Could not stop DB2."))
self.status.end_restart()
raise RuntimeError(_("Could not stop DB2."))
def restart(self):
LOG.debug("Restarting DB2 server instance.")
try:
self.status.begin_restart()
self.stop_db()
self.start_db()
finally:
self.status.end_restart()
class DB2AppStatus(service.BaseDbStatus):
"""
Handles all of the status updating for the DB2 guest agent.
"""
def _get_actual_db_status(self):
LOG.debug("Getting the status of the DB2 server instance.")
try:
out, err = utils.execute_with_timeout(
system.DB2_STATUS, shell=True)
if "0" not in out:
return rd_instance.ServiceStatuses.RUNNING
else:
return rd_instance.ServiceStatuses.SHUTDOWN
except exception.ProcessExecutionError:
LOG.exception(_("Error getting the DB2 server status."))
return rd_instance.ServiceStatuses.CRASHED
def run_command(command, superuser=system.DB2_INSTANCE_OWNER,
timeout=system.TIMEOUT):
return utils.execute_with_timeout("sudo", "su", "-", superuser, "-c",
command, timeout=timeout)
class DB2Admin(object):
"""
Handles administrative tasks on the DB2 instance.
"""
def create_database(self, databases):
"""Create the given database(s)."""
dbName = None
db_create_failed = []
LOG.debug("Creating DB2 databases.")
for item in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(item)
dbName = mydb.name
LOG.debug("Creating DB2 database: %s." % dbName)
try:
run_command(system.CREATE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error creating database: %s.") % dbName)
db_create_failed.append(dbName)
pass
if len(db_create_failed) > 0:
LOG.exception(_("Creating the following databases failed: %s.") %
db_create_failed)
def delete_database(self, database):
"""Delete the specified database."""
dbName = None
try:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
dbName = mydb.name
LOG.debug("Deleting DB2 database: %s." % dbName)
run_command(system.DELETE_DB_COMMAND % {'dbname': dbName})
except exception.ProcessExecutionError:
LOG.exception(_(
"There was an error while deleting database:%s.") % dbName)
raise exception.GuestError(_("Unable to delete database: %s.") %
dbName)
def list_databases(self, limit=None, marker=None, include_marker=False):
LOG.debug("Listing all the DB2 databases.")
databases = []
next_marker = None
try:
out, err = run_command(system.LIST_DB_COMMAND)
dblist = out.split()
result = iter(dblist)
count = 0
if marker is not None:
try:
item = result.next()
while item != marker:
item = result.next()
if item == marker:
marker = None
except StopIteration:
pass
try:
item = result.next()
while item:
count = count + 1
if (limit and count <= limit) or limit is None:
db2_db = models.MySQLDatabase()
db2_db.name = item
LOG.debug("database = %s ." % item)
db2_db.character_set = None
db2_db.collate = None
next_marker = db2_db.name
databases.append(db2_db.serialize())
item = result.next()
else:
next_marker = None
break
except StopIteration:
next_marker = None
LOG.debug("databases = %s." % str(databases))
except exception.ProcessExecutionError as pe:
LOG.exception(_("An error occurred listing databases: %s.") %
pe.message)
pass
return databases, next_marker
def create_user(self, users):
LOG.debug("Creating user(s) for accessing DB2 database(s).")
try:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
try:
LOG.debug("Creating OS user: %s." % user.name)
utils.execute_with_timeout(
system.CREATE_USER_COMMAND % {
'login': user.name, 'login': user.name,
'passwd': user.password}, shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_("Error creating user: %s.") % user.name)
continue
for database in user.databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
try:
LOG.debug("Granting user: %s access to database: %s."
% (user.name, mydb.name))
run_command(system.GRANT_USER_ACCESS % {
'dbname': mydb.name, 'login': user.name})
except exception.ProcessExecutionError as pe:
LOG.debug(
"Error granting user: %s access to database: %s."
% (user.name, mydb.name))
LOG.debug(pe)
pass
except exception.ProcessExecutionError as pe:
LOG.exception(_("An error occurred creating users: %s.") %
pe.message)
pass
def delete_user(self, user):
LOG.debug("Delete a given user.")
db2_user = models.MySQLUser()
db2_user.deserialize(user)
userName = db2_user.name
user_dbs = db2_user.databases
LOG.debug("For user %s, databases to be deleted = %r." % (
userName, user_dbs))
if len(user_dbs) == 0:
databases = self.list_access(db2_user.name, None)
else:
databases = user_dbs
LOG.debug("databases for user = %r." % databases)
for database in databases:
mydb = models.ValidatedMySQLDatabase()
mydb.deserialize(database)
try:
run_command(system.REVOKE_USER_ACCESS % {
'dbname': mydb.name,
'login': userName})
LOG.debug("Revoked access for user:%s on database:%s." % (
userName, mydb.name))
except exception.ProcessExecutionError as pe:
LOG.debug("Error occurred while revoking access to %s." %
mydb.name)
pass
try:
utils.execute_with_timeout(system.DELETE_USER_COMMAND % {
'login': db2_user.name.lower()}, shell=True)
except exception.ProcessExecutionError as pe:
LOG.exception(_(
"There was an error while deleting user: %s.") % pe)
raise exception.GuestError(_("Unable to delete user: %s.") %
userName)
def list_users(self, limit=None, marker=None, include_marker=False):
LOG.debug(
"List all users for all the databases in a DB2 server instance.")
users = []
user_map = {}
next_marker = None
count = 0
databases, marker = self.list_databases()
for database in databases:
db2_db = models.MySQLDatabase()
db2_db.deserialize(database)
out = None
try:
out, err = run_command(
system.LIST_DB_USERS % {'dbname': db2_db.name})
except exception.ProcessExecutionError:
LOG.debug(
"There was an error while listing users for database: %s."
% db2_db.name)
continue
userlist = []
for item in out.split('\n'):
LOG.debug("item = %r" % item)
user = item.split() if item != "" else None
LOG.debug("user = %r" % (user))
if (user is not None
and (user[0] not in cfg.get_ignored_users(manager='db2')
and user[1] == 'Y')):
userlist.append(user[0])
result = iter(userlist)
if marker is not None:
try:
item = result.next()
while item != marker:
item = result.next()
if item == marker:
marker = None
except StopIteration:
pass
try:
item = result.next()
db2db = models.MySQLDatabase()
db2db.name = db2_db.name
while item:
'''
Check if the user has already been discovered. If so,
add this database to the database list for this user.
'''
if item in user_map:
db2user = user_map.get(item)
db2user.databases.append(db2db.serialize())
item = result.next()
continue
'''
If this user was not previously discovered, then add
this to the user's list.
'''
count = count + 1
if (limit and count <= limit) or limit is None:
db2_user = models.MySQLUser()
db2_user.name = item
db2_user.databases.append(db2db.serialize())
users.append(db2_user.serialize())
user_map.update({item: db2_user})
item = result.next()
else:
next_marker = None
break
except StopIteration:
next_marker = None
if count == limit:
break
return users, next_marker
def get_user(self, username, hostname):
LOG.debug("Get details of a given database user.")
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
LOG.debug("Get details of a given database user %s." % username)
user = models.MySQLUser()
user.name = username
databases, marker = self.list_databases()
out = None
for database in databases:
db2_db = models.MySQLDatabase()
db2_db.deserialize(database)
try:
out, err = run_command(
system.LIST_DB_USERS % {'dbname': db2_db.name})
except exception.ProcessExecutionError:
LOG.debug(
"Error while trying to get the users for database: %s." %
db2_db.name)
continue
for item in out.split('\n'):
user_access = item.split() if item != "" else None
if (user_access is not None and
user_access[0].lower() == username.lower() and
user_access[1] == 'Y'):
user.databases = db2_db.name
break
return user
def list_access(self, username, hostname):
"""
Show all the databases to which the user has more than
USAGE granted.
"""
LOG.debug("Listing databases that user: %s has access to." % username)
user = self._get_user(username, hostname)
return user.databases
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2016, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Detect the current architecture and operating system.
Some functions here are really from kernel32.dll, others from version.dll.
"""
from defines import *
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- NTDDI version ------------------------------------------------------------
NTDDI_WIN8 = 0x06020000
NTDDI_WIN7SP1 = 0x06010100
NTDDI_WIN7 = 0x06010000
NTDDI_WS08 = 0x06000100
NTDDI_VISTASP1 = 0x06000100
NTDDI_VISTA = 0x06000000
NTDDI_LONGHORN = NTDDI_VISTA
NTDDI_WS03SP2 = 0x05020200
NTDDI_WS03SP1 = 0x05020100
NTDDI_WS03 = 0x05020000
NTDDI_WINXPSP3 = 0x05010300
NTDDI_WINXPSP2 = 0x05010200
NTDDI_WINXPSP1 = 0x05010100
NTDDI_WINXP = 0x05010000
NTDDI_WIN2KSP4 = 0x05000400
NTDDI_WIN2KSP3 = 0x05000300
NTDDI_WIN2KSP2 = 0x05000200
NTDDI_WIN2KSP1 = 0x05000100
NTDDI_WIN2K = 0x05000000
NTDDI_WINNT4 = 0x04000000
OSVERSION_MASK = 0xFFFF0000
SPVERSION_MASK = 0x0000FF00
SUBVERSION_MASK = 0x000000FF
#--- OSVERSIONINFO and OSVERSIONINFOEX structures and constants ---------------
VER_PLATFORM_WIN32s = 0
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_SUITE_BACKOFFICE = 0x00000004
VER_SUITE_BLADE = 0x00000400
VER_SUITE_COMPUTE_SERVER = 0x00004000
VER_SUITE_DATACENTER = 0x00000080
VER_SUITE_ENTERPRISE = 0x00000002
VER_SUITE_EMBEDDEDNT = 0x00000040
VER_SUITE_PERSONAL = 0x00000200
VER_SUITE_SINGLEUSERTS = 0x00000100
VER_SUITE_SMALLBUSINESS = 0x00000001
VER_SUITE_SMALLBUSINESS_RESTRICTED = 0x00000020
VER_SUITE_STORAGE_SERVER = 0x00002000
VER_SUITE_TERMINAL = 0x00000010
VER_SUITE_WH_SERVER = 0x00008000
VER_NT_DOMAIN_CONTROLLER = 0x0000002
VER_NT_SERVER = 0x0000003
VER_NT_WORKSTATION = 0x0000001
VER_BUILDNUMBER = 0x0000004
VER_MAJORVERSION = 0x0000002
VER_MINORVERSION = 0x0000001
VER_PLATFORMID = 0x0000008
VER_PRODUCT_TYPE = 0x0000080
VER_SERVICEPACKMAJOR = 0x0000020
VER_SERVICEPACKMINOR = 0x0000010
VER_SUITENAME = 0x0000040
VER_EQUAL = 1
VER_GREATER = 2
VER_GREATER_EQUAL = 3
VER_LESS = 4
VER_LESS_EQUAL = 5
VER_AND = 6
VER_OR = 7
# typedef struct _OSVERSIONINFO {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# }OSVERSIONINFO;
class OSVERSIONINFOA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
]
class OSVERSIONINFOW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
]
# typedef struct _OSVERSIONINFOEX {
# DWORD dwOSVersionInfoSize;
# DWORD dwMajorVersion;
# DWORD dwMinorVersion;
# DWORD dwBuildNumber;
# DWORD dwPlatformId;
# TCHAR szCSDVersion[128];
# WORD wServicePackMajor;
# WORD wServicePackMinor;
# WORD wSuiteMask;
# BYTE wProductType;
# BYTE wReserved;
# }OSVERSIONINFOEX, *POSVERSIONINFOEX, *LPOSVERSIONINFOEX;
class OSVERSIONINFOEXA(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", CHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
class OSVERSIONINFOEXW(Structure):
_fields_ = [
("dwOSVersionInfoSize", DWORD),
("dwMajorVersion", DWORD),
("dwMinorVersion", DWORD),
("dwBuildNumber", DWORD),
("dwPlatformId", DWORD),
("szCSDVersion", WCHAR * 128),
("wServicePackMajor", WORD),
("wServicePackMinor", WORD),
("wSuiteMask", WORD),
("wProductType", BYTE),
("wReserved", BYTE),
]
LPOSVERSIONINFOA = POINTER(OSVERSIONINFOA)
LPOSVERSIONINFOW = POINTER(OSVERSIONINFOW)
LPOSVERSIONINFOEXA = POINTER(OSVERSIONINFOEXA)
LPOSVERSIONINFOEXW = POINTER(OSVERSIONINFOEXW)
POSVERSIONINFOA = LPOSVERSIONINFOA
POSVERSIONINFOW = LPOSVERSIONINFOW
POSVERSIONINFOEXA = LPOSVERSIONINFOEXA
POSVERSIONINFOEXW = LPOSVERSIONINFOA
#--- GetSystemMetrics constants -----------------------------------------------
SM_CXSCREEN = 0
SM_CYSCREEN = 1
SM_CXVSCROLL = 2
SM_CYHSCROLL = 3
SM_CYCAPTION = 4
SM_CXBORDER = 5
SM_CYBORDER = 6
SM_CXDLGFRAME = 7
SM_CYDLGFRAME = 8
SM_CYVTHUMB = 9
SM_CXHTHUMB = 10
SM_CXICON = 11
SM_CYICON = 12
SM_CXCURSOR = 13
SM_CYCURSOR = 14
SM_CYMENU = 15
SM_CXFULLSCREEN = 16
SM_CYFULLSCREEN = 17
SM_CYKANJIWINDOW = 18
SM_MOUSEPRESENT = 19
SM_CYVSCROLL = 20
SM_CXHSCROLL = 21
SM_DEBUG = 22
SM_SWAPBUTTON = 23
SM_RESERVED1 = 24
SM_RESERVED2 = 25
SM_RESERVED3 = 26
SM_RESERVED4 = 27
SM_CXMIN = 28
SM_CYMIN = 29
SM_CXSIZE = 30
SM_CYSIZE = 31
SM_CXFRAME = 32
SM_CYFRAME = 33
SM_CXMINTRACK = 34
SM_CYMINTRACK = 35
SM_CXDOUBLECLK = 36
SM_CYDOUBLECLK = 37
SM_CXICONSPACING = 38
SM_CYICONSPACING = 39
SM_MENUDROPALIGNMENT = 40
SM_PENWINDOWS = 41
SM_DBCSENABLED = 42
SM_CMOUSEBUTTONS = 43
SM_CXFIXEDFRAME = SM_CXDLGFRAME # ;win40 name change
SM_CYFIXEDFRAME = SM_CYDLGFRAME # ;win40 name change
SM_CXSIZEFRAME = SM_CXFRAME # ;win40 name change
SM_CYSIZEFRAME = SM_CYFRAME # ;win40 name change
SM_SECURE = 44
SM_CXEDGE = 45
SM_CYEDGE = 46
SM_CXMINSPACING = 47
SM_CYMINSPACING = 48
SM_CXSMICON = 49
SM_CYSMICON = 50
SM_CYSMCAPTION = 51
SM_CXSMSIZE = 52
SM_CYSMSIZE = 53
SM_CXMENUSIZE = 54
SM_CYMENUSIZE = 55
SM_ARRANGE = 56
SM_CXMINIMIZED = 57
SM_CYMINIMIZED = 58
SM_CXMAXTRACK = 59
SM_CYMAXTRACK = 60
SM_CXMAXIMIZED = 61
SM_CYMAXIMIZED = 62
SM_NETWORK = 63
SM_CLEANBOOT = 67
SM_CXDRAG = 68
SM_CYDRAG = 69
SM_SHOWSOUNDS = 70
SM_CXMENUCHECK = 71 # Use instead of GetMenuCheckMarkDimensions()!
SM_CYMENUCHECK = 72
SM_SLOWMACHINE = 73
SM_MIDEASTENABLED = 74
SM_MOUSEWHEELPRESENT = 75
SM_XVIRTUALSCREEN = 76
SM_YVIRTUALSCREEN = 77
SM_CXVIRTUALSCREEN = 78
SM_CYVIRTUALSCREEN = 79
SM_CMONITORS = 80
SM_SAMEDISPLAYFORMAT = 81
SM_IMMENABLED = 82
SM_CXFOCUSBORDER = 83
SM_CYFOCUSBORDER = 84
SM_TABLETPC = 86
SM_MEDIACENTER = 87
SM_STARTER = 88
SM_SERVERR2 = 89
SM_MOUSEHORIZONTALWHEELPRESENT = 91
SM_CXPADDEDBORDER = 92
SM_CMETRICS = 93
SM_REMOTESESSION = 0x1000
SM_SHUTTINGDOWN = 0x2000
SM_REMOTECONTROL = 0x2001
SM_CARETBLINKINGENABLED = 0x2002
#--- SYSTEM_INFO structure, GetSystemInfo() and GetNativeSystemInfo() ---------
# Values used by Wine
# Documented values at MSDN are marked with an asterisk
PROCESSOR_ARCHITECTURE_UNKNOWN = 0xFFFF; # Unknown architecture.
PROCESSOR_ARCHITECTURE_INTEL = 0 # x86 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_MIPS = 1 # MIPS
PROCESSOR_ARCHITECTURE_ALPHA = 2 # Alpha
PROCESSOR_ARCHITECTURE_PPC = 3 # Power PC
PROCESSOR_ARCHITECTURE_SHX = 4 # SHX
PROCESSOR_ARCHITECTURE_ARM = 5 # ARM
PROCESSOR_ARCHITECTURE_IA64 = 6 # Intel Itanium *
PROCESSOR_ARCHITECTURE_ALPHA64 = 7 # Alpha64
PROCESSOR_ARCHITECTURE_MSIL = 8 # MSIL
PROCESSOR_ARCHITECTURE_AMD64 = 9 # x64 (AMD or Intel) *
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10 # IA32 on Win64
PROCESSOR_ARCHITECTURE_SPARC = 20 # Sparc (Wine)
# Values used by Wine
# PROCESSOR_OPTIL value found at http://code.google.com/p/ddab-lib/
# Documented values at MSDN are marked with an asterisk
PROCESSOR_INTEL_386 = 386 # Intel i386 *
PROCESSOR_INTEL_486 = 486 # Intel i486 *
PROCESSOR_INTEL_PENTIUM = 586 # Intel Pentium *
PROCESSOR_INTEL_IA64 = 2200 # Intel IA64 (Itanium) *
PROCESSOR_AMD_X8664 = 8664 # AMD X86 64 *
PROCESSOR_MIPS_R4000 = 4000 # MIPS R4000, R4101, R3910
PROCESSOR_ALPHA_21064 = 21064 # Alpha 210 64
PROCESSOR_PPC_601 = 601 # PPC 601
PROCESSOR_PPC_603 = 603 # PPC 603
PROCESSOR_PPC_604 = 604 # PPC 604
PROCESSOR_PPC_620 = 620 # PPC 620
PROCESSOR_HITACHI_SH3 = 10003 # Hitachi SH3 (Windows CE)
PROCESSOR_HITACHI_SH3E = 10004 # Hitachi SH3E (Windows CE)
PROCESSOR_HITACHI_SH4 = 10005 # Hitachi SH4 (Windows CE)
PROCESSOR_MOTOROLA_821 = 821 # Motorola 821 (Windows CE)
PROCESSOR_SHx_SH3 = 103 # SHx SH3 (Windows CE)
PROCESSOR_SHx_SH4 = 104 # SHx SH4 (Windows CE)
PROCESSOR_STRONGARM = 2577 # StrongARM (Windows CE)
PROCESSOR_ARM720 = 1824 # ARM 720 (Windows CE)
PROCESSOR_ARM820 = 2080 # ARM 820 (Windows CE)
PROCESSOR_ARM920 = 2336 # ARM 920 (Windows CE)
PROCESSOR_ARM_7TDMI = 70001 # ARM 7TDMI (Windows CE)
PROCESSOR_OPTIL = 0x494F # MSIL
# typedef struct _SYSTEM_INFO {
# union {
# DWORD dwOemId;
# struct {
# WORD wProcessorArchitecture;
# WORD wReserved;
# } ;
# } ;
# DWORD dwPageSize;
# LPVOID lpMinimumApplicationAddress;
# LPVOID lpMaximumApplicationAddress;
# DWORD_PTR dwActiveProcessorMask;
# DWORD dwNumberOfProcessors;
# DWORD dwProcessorType;
# DWORD dwAllocationGranularity;
# WORD wProcessorLevel;
# WORD wProcessorRevision;
# } SYSTEM_INFO;
class _SYSTEM_INFO_OEM_ID_STRUCT(Structure):
_fields_ = [
("wProcessorArchitecture", WORD),
("wReserved", WORD),
]
class _SYSTEM_INFO_OEM_ID(Union):
_fields_ = [
("dwOemId", DWORD),
("w", _SYSTEM_INFO_OEM_ID_STRUCT),
]
class SYSTEM_INFO(Structure):
_fields_ = [
("id", _SYSTEM_INFO_OEM_ID),
("dwPageSize", DWORD),
("lpMinimumApplicationAddress", LPVOID),
("lpMaximumApplicationAddress", LPVOID),
("dwActiveProcessorMask", DWORD_PTR),
("dwNumberOfProcessors", DWORD),
("dwProcessorType", DWORD),
("dwAllocationGranularity", DWORD),
("wProcessorLevel", WORD),
("wProcessorRevision", WORD),
]
def __get_dwOemId(self):
return self.id.dwOemId
def __set_dwOemId(self, value):
self.id.dwOemId = value
dwOemId = property(__get_dwOemId, __set_dwOemId)
def __get_wProcessorArchitecture(self):
return self.id.w.wProcessorArchitecture
def __set_wProcessorArchitecture(self, value):
self.id.w.wProcessorArchitecture = value
wProcessorArchitecture = property(__get_wProcessorArchitecture, __set_wProcessorArchitecture)
LPSYSTEM_INFO = ctypes.POINTER(SYSTEM_INFO)
# void WINAPI GetSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetSystemInfo():
_GetSystemInfo = windll.kernel32.GetSystemInfo
_GetSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetSystemInfo(byref(sysinfo))
return sysinfo
# void WINAPI GetNativeSystemInfo(
# __out LPSYSTEM_INFO lpSystemInfo
# );
def GetNativeSystemInfo():
_GetNativeSystemInfo = windll.kernel32.GetNativeSystemInfo
_GetNativeSystemInfo.argtypes = [LPSYSTEM_INFO]
_GetNativeSystemInfo.restype = None
sysinfo = SYSTEM_INFO()
_GetNativeSystemInfo(byref(sysinfo))
return sysinfo
# int WINAPI GetSystemMetrics(
# __in int nIndex
# );
def GetSystemMetrics(nIndex):
_GetSystemMetrics = windll.user32.GetSystemMetrics
_GetSystemMetrics.argtypes = [ctypes.c_int]
_GetSystemMetrics.restype = ctypes.c_int
return _GetSystemMetrics(nIndex)
# SIZE_T WINAPI GetLargePageMinimum(void);
def GetLargePageMinimum():
_GetLargePageMinimum = windll.user32.GetLargePageMinimum
_GetLargePageMinimum.argtypes = []
_GetLargePageMinimum.restype = SIZE_T
return _GetLargePageMinimum()
# HANDLE WINAPI GetCurrentProcess(void);
def GetCurrentProcess():
## return 0xFFFFFFFFFFFFFFFFL
_GetCurrentProcess = windll.kernel32.GetCurrentProcess
_GetCurrentProcess.argtypes = []
_GetCurrentProcess.restype = HANDLE
return _GetCurrentProcess()
# HANDLE WINAPI GetCurrentThread(void);
def GetCurrentThread():
## return 0xFFFFFFFFFFFFFFFEL
_GetCurrentThread = windll.kernel32.GetCurrentThread
_GetCurrentThread.argtypes = []
_GetCurrentThread.restype = HANDLE
return _GetCurrentThread()
# BOOL WINAPI IsWow64Process(
# __in HANDLE hProcess,
# __out PBOOL Wow64Process
# );
def IsWow64Process(hProcess):
_IsWow64Process = windll.kernel32.IsWow64Process
_IsWow64Process.argtypes = [HANDLE, PBOOL]
_IsWow64Process.restype = bool
_IsWow64Process.errcheck = RaiseIfZero
Wow64Process = BOOL(FALSE)
_IsWow64Process(hProcess, byref(Wow64Process))
return bool(Wow64Process)
# DWORD WINAPI GetVersion(void);
def GetVersion():
_GetVersion = windll.kernel32.GetVersion
_GetVersion.argtypes = []
_GetVersion.restype = DWORD
_GetVersion.errcheck = RaiseIfZero
# See the example code here:
# http://msdn.microsoft.com/en-us/library/ms724439(VS.85).aspx
dwVersion = _GetVersion()
dwMajorVersion = dwVersion & 0x000000FF
dwMinorVersion = (dwVersion & 0x0000FF00) >> 8
if (dwVersion & 0x80000000) == 0:
dwBuild = (dwVersion & 0x7FFF0000) >> 16
else:
dwBuild = None
return int(dwMajorVersion), int(dwMinorVersion), int(dwBuild)
# BOOL WINAPI GetVersionEx(
# __inout LPOSVERSIONINFO lpVersionInfo
# );
def GetVersionExA():
_GetVersionExA = windll.kernel32.GetVersionExA
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOEXA)]
_GetVersionExA.restype = bool
_GetVersionExA.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXA()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExA(byref(osi))
except WindowsError:
osi = OSVERSIONINFOA()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExA.argtypes = [POINTER(OSVERSIONINFOA)]
_GetVersionExA(byref(osi))
return osi
def GetVersionExW():
_GetVersionExW = windll.kernel32.GetVersionExW
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOEXW)]
_GetVersionExW.restype = bool
_GetVersionExW.errcheck = RaiseIfZero
osi = OSVERSIONINFOEXW()
osi.dwOSVersionInfoSize = sizeof(osi)
try:
_GetVersionExW(byref(osi))
except WindowsError:
osi = OSVERSIONINFOW()
osi.dwOSVersionInfoSize = sizeof(osi)
_GetVersionExW.argtypes = [POINTER(OSVERSIONINFOW)]
_GetVersionExW(byref(osi))
return osi
GetVersionEx = GuessStringType(GetVersionExA, GetVersionExW)
# BOOL WINAPI GetProductInfo(
# __in DWORD dwOSMajorVersion,
# __in DWORD dwOSMinorVersion,
# __in DWORD dwSpMajorVersion,
# __in DWORD dwSpMinorVersion,
# __out PDWORD pdwReturnedProductType
# );
def GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion):
_GetProductInfo = windll.kernel32.GetProductInfo
_GetProductInfo.argtypes = [DWORD, DWORD, DWORD, DWORD, PDWORD]
_GetProductInfo.restype = BOOL
_GetProductInfo.errcheck = RaiseIfZero
dwReturnedProductType = DWORD(0)
_GetProductInfo(dwOSMajorVersion, dwOSMinorVersion, dwSpMajorVersion, dwSpMinorVersion, byref(dwReturnedProductType))
return dwReturnedProductType.value
# BOOL WINAPI VerifyVersionInfo(
# __in LPOSVERSIONINFOEX lpVersionInfo,
# __in DWORD dwTypeMask,
# __in DWORDLONG dwlConditionMask
# );
def VerifyVersionInfo(lpVersionInfo, dwTypeMask, dwlConditionMask):
if isinstance(lpVersionInfo, OSVERSIONINFOEXA):
return VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask)
if isinstance(lpVersionInfo, OSVERSIONINFOEXW):
return VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask)
raise TypeError("Bad OSVERSIONINFOEX structure")
def VerifyVersionInfoA(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoA = windll.kernel32.VerifyVersionInfoA
_VerifyVersionInfoA.argtypes = [LPOSVERSIONINFOEXA, DWORD, DWORDLONG]
_VerifyVersionInfoA.restype = bool
return _VerifyVersionInfoA(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
def VerifyVersionInfoW(lpVersionInfo, dwTypeMask, dwlConditionMask):
_VerifyVersionInfoW = windll.kernel32.VerifyVersionInfoW
_VerifyVersionInfoW.argtypes = [LPOSVERSIONINFOEXW, DWORD, DWORDLONG]
_VerifyVersionInfoW.restype = bool
return _VerifyVersionInfoW(byref(lpVersionInfo), dwTypeMask, dwlConditionMask)
# ULONGLONG WINAPI VerSetConditionMask(
# __in ULONGLONG dwlConditionMask,
# __in DWORD dwTypeBitMask,
# __in BYTE dwConditionMask
# );
def VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask):
_VerSetConditionMask = windll.kernel32.VerSetConditionMask
_VerSetConditionMask.argtypes = [ULONGLONG, DWORD, BYTE]
_VerSetConditionMask.restype = ULONGLONG
return _VerSetConditionMask(dwlConditionMask, dwTypeBitMask, dwConditionMask)
#--- get_bits, get_arch and get_os --------------------------------------------
ARCH_UNKNOWN = "unknown"
ARCH_I386 = "i386"
ARCH_MIPS = "mips"
ARCH_ALPHA = "alpha"
ARCH_PPC = "ppc"
ARCH_SHX = "shx"
ARCH_ARM = "arm"
ARCH_ARM64 = "arm64"
ARCH_THUMB = "thumb"
ARCH_IA64 = "ia64"
ARCH_ALPHA64 = "alpha64"
ARCH_MSIL = "msil"
ARCH_AMD64 = "amd64"
ARCH_SPARC = "sparc"
# aliases
ARCH_IA32 = ARCH_I386
ARCH_X86 = ARCH_I386
ARCH_X64 = ARCH_AMD64
ARCH_ARM7 = ARCH_ARM
ARCH_ARM8 = ARCH_ARM64
ARCH_T32 = ARCH_THUMB
ARCH_AARCH32 = ARCH_ARM7
ARCH_AARCH64 = ARCH_ARM8
ARCH_POWERPC = ARCH_PPC
ARCH_HITACHI = ARCH_SHX
ARCH_ITANIUM = ARCH_IA64
# win32 constants -> our constants
_arch_map = {
PROCESSOR_ARCHITECTURE_INTEL : ARCH_I386,
PROCESSOR_ARCHITECTURE_MIPS : ARCH_MIPS,
PROCESSOR_ARCHITECTURE_ALPHA : ARCH_ALPHA,
PROCESSOR_ARCHITECTURE_PPC : ARCH_PPC,
PROCESSOR_ARCHITECTURE_SHX : ARCH_SHX,
PROCESSOR_ARCHITECTURE_ARM : ARCH_ARM,
PROCESSOR_ARCHITECTURE_IA64 : ARCH_IA64,
PROCESSOR_ARCHITECTURE_ALPHA64 : ARCH_ALPHA64,
PROCESSOR_ARCHITECTURE_MSIL : ARCH_MSIL,
PROCESSOR_ARCHITECTURE_AMD64 : ARCH_AMD64,
PROCESSOR_ARCHITECTURE_SPARC : ARCH_SPARC,
}
OS_UNKNOWN = "Unknown"
OS_NT = "Windows NT"
OS_W2K = "Windows 2000"
OS_XP = "Windows XP"
OS_XP_64 = "Windows XP (64 bits)"
OS_W2K3 = "Windows 2003"
OS_W2K3_64 = "Windows 2003 (64 bits)"
OS_W2K3R2 = "Windows 2003 R2"
OS_W2K3R2_64 = "Windows 2003 R2 (64 bits)"
OS_W2K8 = "Windows 2008"
OS_W2K8_64 = "Windows 2008 (64 bits)"
OS_W2K8R2 = "Windows 2008 R2"
OS_W2K8R2_64 = "Windows 2008 R2 (64 bits)"
OS_VISTA = "Windows Vista"
OS_VISTA_64 = "Windows Vista (64 bits)"
OS_W7 = "Windows 7"
OS_W7_64 = "Windows 7 (64 bits)"
OS_SEVEN = OS_W7
OS_SEVEN_64 = OS_W7_64
OS_WINDOWS_NT = OS_NT
OS_WINDOWS_2000 = OS_W2K
OS_WINDOWS_XP = OS_XP
OS_WINDOWS_XP_64 = OS_XP_64
OS_WINDOWS_2003 = OS_W2K3
OS_WINDOWS_2003_64 = OS_W2K3_64
OS_WINDOWS_2003_R2 = OS_W2K3R2
OS_WINDOWS_2003_R2_64 = OS_W2K3R2_64
OS_WINDOWS_2008 = OS_W2K8
OS_WINDOWS_2008_64 = OS_W2K8_64
OS_WINDOWS_2008_R2 = OS_W2K8R2
OS_WINDOWS_2008_R2_64 = OS_W2K8R2_64
OS_WINDOWS_VISTA = OS_VISTA
OS_WINDOWS_VISTA_64 = OS_VISTA_64
OS_WINDOWS_SEVEN = OS_W7
OS_WINDOWS_SEVEN_64 = OS_W7_64
def _get_bits():
"""
Determines the current integer size in bits.
This is useful to know if we're running in a 32 bits or a 64 bits machine.
@rtype: int
@return: Returns the size of L{SIZE_T} in bits.
"""
return sizeof(SIZE_T) * 8
def _get_arch():
"""
Determines the current processor architecture.
@rtype: str
@return:
On error, returns:
- L{ARCH_UNKNOWN} (C{"unknown"}) meaning the architecture could not be detected or is not known to WinAppDbg.
On success, returns one of the following values:
- L{ARCH_I386} (C{"i386"}) for Intel 32-bit x86 processor or compatible.
- L{ARCH_AMD64} (C{"amd64"}) for Intel 64-bit x86_64 processor or compatible.
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines... let me know if you do! :)
- L{ARCH_MIPS} (C{"mips"}) for MIPS compatible processors.
- L{ARCH_ALPHA} (C{"alpha"}) for Alpha processors.
- L{ARCH_PPC} (C{"ppc"}) for PowerPC compatible processors.
- L{ARCH_SHX} (C{"shx"}) for Hitachi SH processors.
- L{ARCH_ARM} (C{"arm"}) for ARM compatible processors.
- L{ARCH_IA64} (C{"ia64"}) for Intel Itanium processor or compatible.
- L{ARCH_ALPHA64} (C{"alpha64"}) for Alpha64 processors.
- L{ARCH_MSIL} (C{"msil"}) for the .NET virtual machine.
- L{ARCH_SPARC} (C{"sparc"}) for Sun Sparc processors.
Probably IronPython returns C{ARCH_MSIL} but I haven't tried it. Python
on Windows CE and Windows Mobile should return C{ARCH_ARM}. Python on
Solaris using Wine would return C{ARCH_SPARC}. Python in an Itanium
machine should return C{ARCH_IA64} both on Wine and proper Windows.
All other values should only be returned on Linux using Wine.
"""
try:
si = GetNativeSystemInfo()
except Exception:
si = GetSystemInfo()
try:
return _arch_map[si.id.w.wProcessorArchitecture]
except KeyError:
return ARCH_UNKNOWN
def _get_wow64():
"""
Determines if the current process is running in Windows-On-Windows 64 bits.
@rtype: bool
@return: C{True} of the current process is a 32 bit program running in a
64 bit version of Windows, C{False} if it's either a 32 bit program
in a 32 bit Windows or a 64 bit program in a 64 bit Windows.
"""
# Try to determine if the debugger itself is running on WOW64.
# On error assume False.
if bits == 64:
wow64 = False
else:
try:
wow64 = IsWow64Process( GetCurrentProcess() )
except Exception:
wow64 = False
return wow64
def _get_os(osvi = None):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{GetVersionEx}.
@rtype: str
@return:
One of the following values:
- L{OS_UNKNOWN} (C{"Unknown"})
- L{OS_NT} (C{"Windows NT"})
- L{OS_W2K} (C{"Windows 2000"})
- L{OS_XP} (C{"Windows XP"})
- L{OS_XP_64} (C{"Windows XP (64 bits)"})
- L{OS_W2K3} (C{"Windows 2003"})
- L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"})
- L{OS_W2K3R2} (C{"Windows 2003 R2"})
- L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"})
- L{OS_W2K8} (C{"Windows 2008"})
- L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"})
- L{OS_W2K8R2} (C{"Windows 2008 R2"})
- L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"})
- L{OS_VISTA} (C{"Windows Vista"})
- L{OS_VISTA_64} (C{"Windows Vista (64 bits)"})
- L{OS_W7} (C{"Windows 7"})
- L{OS_W7_64} (C{"Windows 7 (64 bits)"})
"""
# rough port of http://msdn.microsoft.com/en-us/library/ms724429%28VS.85%29.aspx
if not osvi:
osvi = GetVersionEx()
if osvi.dwPlatformId == VER_PLATFORM_WIN32_NT and osvi.dwMajorVersion > 4:
if osvi.dwMajorVersion == 6:
if osvi.dwMinorVersion == 0:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows Vista (64 bits)'
return 'Windows Vista'
else:
if bits == 64 or wow64:
return 'Windows 2008 (64 bits)'
return 'Windows 2008'
if osvi.dwMinorVersion == 1:
if osvi.wProductType == VER_NT_WORKSTATION:
if bits == 64 or wow64:
return 'Windows 7 (64 bits)'
return 'Windows 7'
else:
if bits == 64 or wow64:
return 'Windows 2008 R2 (64 bits)'
return 'Windows 2008 R2'
if osvi.dwMajorVersion == 5:
if osvi.dwMinorVersion == 2:
if GetSystemMetrics(SM_SERVERR2):
if bits == 64 or wow64:
return 'Windows 2003 R2 (64 bits)'
return 'Windows 2003 R2'
if osvi.wSuiteMask in (VER_SUITE_STORAGE_SERVER, VER_SUITE_WH_SERVER):
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.wProductType == VER_NT_WORKSTATION and arch == ARCH_AMD64:
return 'Windows XP (64 bits)'
else:
if bits == 64 or wow64:
return 'Windows 2003 (64 bits)'
return 'Windows 2003'
if osvi.dwMinorVersion == 1:
return 'Windows XP'
if osvi.dwMinorVersion == 0:
return 'Windows 2000'
if osvi.dwMajorVersion == 4:
return 'Windows NT'
return 'Unknown'
def _get_ntddi(osvi):
"""
Determines the current operating system.
This function allows you to quickly tell apart major OS differences.
For more detailed information call L{kernel32.GetVersionEx} instead.
@note:
Wine reports itself as Windows XP 32 bits
(even if the Linux host is 64 bits).
ReactOS may report itself as Windows 2000 or Windows XP,
depending on the version of ReactOS.
@type osvi: L{OSVERSIONINFOEXA}
@param osvi: Optional. The return value from L{kernel32.GetVersionEx}.
@rtype: int
@return: NTDDI version number.
"""
if not osvi:
osvi = GetVersionEx()
ntddi = 0
ntddi += (osvi.dwMajorVersion & 0xFF) << 24
ntddi += (osvi.dwMinorVersion & 0xFF) << 16
ntddi += (osvi.wServicePackMajor & 0xFF) << 8
ntddi += (osvi.wServicePackMinor & 0xFF)
return ntddi
# The order of the following definitions DOES matter!
# Current integer size in bits. See L{_get_bits} for more details.
bits = _get_bits()
# Current processor architecture. See L{_get_arch} for more details.
arch = _get_arch()
# Set to C{True} if the current process is running in WOW64. See L{_get_wow64} for more details.
wow64 = _get_wow64()
_osvi = GetVersionEx()
# Current operating system. See L{_get_os} for more details.
os = _get_os(_osvi)
# Current operating system as an NTDDI constant. See L{_get_ntddi} for more details.
NTDDI_VERSION = _get_ntddi(_osvi)
# Upper word of L{NTDDI_VERSION}, contains the OS major and minor version number.
WINVER = NTDDI_VERSION >> 16
#--- version.dll --------------------------------------------------------------
VS_FF_DEBUG = 0x00000001
VS_FF_PRERELEASE = 0x00000002
VS_FF_PATCHED = 0x00000004
VS_FF_PRIVATEBUILD = 0x00000008
VS_FF_INFOINFERRED = 0x00000010
VS_FF_SPECIALBUILD = 0x00000020
VOS_UNKNOWN = 0x00000000
VOS__WINDOWS16 = 0x00000001
VOS__PM16 = 0x00000002
VOS__PM32 = 0x00000003
VOS__WINDOWS32 = 0x00000004
VOS_DOS = 0x00010000
VOS_OS216 = 0x00020000
VOS_OS232 = 0x00030000
VOS_NT = 0x00040000
VOS_DOS_WINDOWS16 = 0x00010001
VOS_DOS_WINDOWS32 = 0x00010004
VOS_NT_WINDOWS32 = 0x00040004
VOS_OS216_PM16 = 0x00020002
VOS_OS232_PM32 = 0x00030003
VFT_UNKNOWN = 0x00000000
VFT_APP = 0x00000001
VFT_DLL = 0x00000002
VFT_DRV = 0x00000003
VFT_FONT = 0x00000004
VFT_VXD = 0x00000005
VFT_RESERVED = 0x00000006 # undocumented
VFT_STATIC_LIB = 0x00000007
VFT2_UNKNOWN = 0x00000000
VFT2_DRV_PRINTER = 0x00000001
VFT2_DRV_KEYBOARD = 0x00000002
VFT2_DRV_LANGUAGE = 0x00000003
VFT2_DRV_DISPLAY = 0x00000004
VFT2_DRV_MOUSE = 0x00000005
VFT2_DRV_NETWORK = 0x00000006
VFT2_DRV_SYSTEM = 0x00000007
VFT2_DRV_INSTALLABLE = 0x00000008
VFT2_DRV_SOUND = 0x00000009
VFT2_DRV_COMM = 0x0000000A
VFT2_DRV_RESERVED = 0x0000000B # undocumented
VFT2_DRV_VERSIONED_PRINTER = 0x0000000C
VFT2_FONT_RASTER = 0x00000001
VFT2_FONT_VECTOR = 0x00000002
VFT2_FONT_TRUETYPE = 0x00000003
# typedef struct tagVS_FIXEDFILEINFO {
# DWORD dwSignature;
# DWORD dwStrucVersion;
# DWORD dwFileVersionMS;
# DWORD dwFileVersionLS;
# DWORD dwProductVersionMS;
# DWORD dwProductVersionLS;
# DWORD dwFileFlagsMask;
# DWORD dwFileFlags;
# DWORD dwFileOS;
# DWORD dwFileType;
# DWORD dwFileSubtype;
# DWORD dwFileDateMS;
# DWORD dwFileDateLS;
# } VS_FIXEDFILEINFO;
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
PVS_FIXEDFILEINFO = POINTER(VS_FIXEDFILEINFO)
LPVS_FIXEDFILEINFO = PVS_FIXEDFILEINFO
# BOOL WINAPI GetFileVersionInfo(
# _In_ LPCTSTR lptstrFilename,
# _Reserved_ DWORD dwHandle,
# _In_ DWORD dwLen,
# _Out_ LPVOID lpData
# );
# DWORD WINAPI GetFileVersionInfoSize(
# _In_ LPCTSTR lptstrFilename,
# _Out_opt_ LPDWORD lpdwHandle
# );
def GetFileVersionInfoA(lptstrFilename):
_GetFileVersionInfoA = windll.version.GetFileVersionInfoA
_GetFileVersionInfoA.argtypes = [LPSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoA.restype = bool
_GetFileVersionInfoA.errcheck = RaiseIfZero
_GetFileVersionInfoSizeA = windll.version.GetFileVersionInfoSizeA
_GetFileVersionInfoSizeA.argtypes = [LPSTR, LPVOID]
_GetFileVersionInfoSizeA.restype = DWORD
_GetFileVersionInfoSizeA.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeA(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen)
_GetFileVersionInfoA(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
def GetFileVersionInfoW(lptstrFilename):
_GetFileVersionInfoW = windll.version.GetFileVersionInfoW
_GetFileVersionInfoW.argtypes = [LPWSTR, DWORD, DWORD, LPVOID]
_GetFileVersionInfoW.restype = bool
_GetFileVersionInfoW.errcheck = RaiseIfZero
_GetFileVersionInfoSizeW = windll.version.GetFileVersionInfoSizeW
_GetFileVersionInfoSizeW.argtypes = [LPWSTR, LPVOID]
_GetFileVersionInfoSizeW.restype = DWORD
_GetFileVersionInfoSizeW.errcheck = RaiseIfZero
dwLen = _GetFileVersionInfoSizeW(lptstrFilename, None)
lpData = ctypes.create_string_buffer(dwLen) # not a string!
_GetFileVersionInfoW(lptstrFilename, 0, dwLen, byref(lpData))
return lpData
GetFileVersionInfo = GuessStringType(GetFileVersionInfoA, GetFileVersionInfoW)
# BOOL WINAPI VerQueryValue(
# _In_ LPCVOID pBlock,
# _In_ LPCTSTR lpSubBlock,
# _Out_ LPVOID *lplpBuffer,
# _Out_ PUINT puLen
# );
def VerQueryValueA(pBlock, lpSubBlock):
_VerQueryValueA = windll.version.VerQueryValueA
_VerQueryValueA.argtypes = [LPVOID, LPSTR, LPVOID, POINTER(UINT)]
_VerQueryValueA.restype = bool
_VerQueryValueA.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueA(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
def VerQueryValueW(pBlock, lpSubBlock):
_VerQueryValueW = windll.version.VerQueryValueW
_VerQueryValueW.argtypes = [LPVOID, LPWSTR, LPVOID, POINTER(UINT)]
_VerQueryValueW.restype = bool
_VerQueryValueW.errcheck = RaiseIfZero
lpBuffer = LPVOID(0)
uLen = UINT(0)
_VerQueryValueW(pBlock, lpSubBlock, byref(lpBuffer), byref(uLen))
return lpBuffer, uLen.value
VerQueryValue = GuessStringType(VerQueryValueA, VerQueryValueW)
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| |
#! /usr/bin/env python
#
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import yaml
import six
sys.path.append('tools/')
import mass_rename_projects
class TestMassRenameProjects(unittest.TestCase):
# Verify the files we're including in this change process
def test_filesToChange(self):
gotFilenames = mass_rename_projects.filesToChange.keys()
expectedFilenames = [
'gerrit/projects.yaml',
'gerritbot/channels.yaml',
'zuul/layout.yaml'
]
six.assertCountEqual(self, gotFilenames, expectedFilenames, "Check that we're modifying the expected files")
# TODO check projects yaml
def test_projects_yaml(self):
renamelist = [
'glance', # openstack project that doesn't need to be renamed
'fuel', # fake project but this text exists in places in the projects.yaml file
'fuel-tasklib', # stackforge project with groups and other nested attributes
'xstatic-jquery.tablesorter', # stackforge project with acl attribute
'anvil', # stackforge project, minimal attributes
'fake-project', # project name doesn't exist
'anvil-fake' # non-existant project with similar prefix
]
projectYaml = """
- project: stackforge/anvil
description: A set of python scripts and utilities to forge raw OpenStack into a productive tool!
- project: openstack/glance
docimpact-group: openstack-manuals
description: OpenStack Image Management (Glance)
options:
- translate
- project: stackforge/fuel-stats
groups:
- fuel
description: Fuel anonymous statistics collector
docimpact-group: fuel
- project: stackforge/fuel-tasklib
description: Fuel tasks library.
docimpact-group: fuel
groups:
- fuel
- project: stackforge/xstatic-jquery.tablesorter
description: Tablesorter jQuery plugin packaged as XStatic.
acl-config: /home/gerrit2/acls/stackforge/xstatic.config
- project: stackforge/yaql
description: Yet another query language
"""
data = yaml.load(projectYaml)
stacklist = mass_rename_projects.build_list("stackforge", renamelist)
result = mass_rename_projects.build_project_data(stacklist, data)
gotData = result.data
gotGitmoves = result.gitmoves
# check result
expectedData = [
{
'project': 'openstack/anvil',
'description': 'A set of python scripts and utilities to forge raw OpenStack into a productive tool!'
},
{
'project': 'openstack/fuel-tasklib',
'docimpact-group': 'fuel',
'description': 'Fuel tasks library.',
'groups': ['fuel']
},
{
'project': 'openstack/glance',
'docimpact-group': 'openstack-manuals',
'description': 'OpenStack Image Management (Glance)',
'options': ['translate']},
{
'project': 'stackforge/fuel-stats',
'docimpact-group': 'fuel',
'description': 'Fuel anonymous statistics collector',
'groups': ['fuel']
},
{
'project': 'openstack/xstatic-jquery.tablesorter',
'acl-config': '/home/gerrit2/acls/openstack/xstatic.config',
'description': 'Tablesorter jQuery plugin packaged as XStatic.'},
{
'project': 'stackforge/yaql',
'description': 'Yet another query language'
}
]
six.assertCountEqual(self, gotData, expectedData, "Check results of projects.yaml renames")
# check gitmoves, should only be stackforge projects
expectedGitmoves = {
'gerrit/acls/stackforge/anvil.config' : 'gerrit/acls/openstack/anvil.config',
'gerrit/acls/stackforge/xstatic.config' : 'gerrit/acls/openstack/xstatic.config',
'gerrit/acls/stackforge/fuel-tasklib.config': 'gerrit/acls/openstack/fuel-tasklib.config'
}
six.assertCountEqual(self, gotGitmoves, expectedGitmoves, "Check git command output for projects.yaml renames")
def test_channels_yaml(self):
channelsYaml = """
fuel-tracker:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- openstack/fuel-plugin-bigswitch
- openstack/fuel-plugin-block-device
- openstack/fuel-plugin-openbook
- openstack/fuel-plugin-purestorage-cinder
- openstack/fuel-plugin-scaleio
- openstack/fuel-plugin-wstunnel
- openstack/fuel-plugin-xenserver
- openstack/fuel-plugin-zabbix-agents
- stackforge/fuel-agent
- stackforge/fuel-astute
- stackforge/fuel-dev-tools
- stackforge/fuel-devops
- stackforge/fuel-docs
- stackforge/fuel-library
- stackforge/fuel-main
- stackforge/fuel-mirror
- stackforge/fuel-nailgun-agent
- stackforge/fuel-octane
- stackforge/fuel-ostf
- stackforge/fuel-plugin-availability-zones
- stackforge/fuel-plugin-calamari
- stackforge/fuel-plugin-calico
- stackforge/fuel-plugin-ceilometer-redis
- stackforge/fuel-plugin-cinder-netapp
- stackforge/fuel-plugin-cisco-aci
- stackforge/fuel-plugin-contrail
- stackforge/fuel-plugin-dbaas-trove
- stackforge/fuel-plugin-detach-database
- stackforge/fuel-plugin-detach-keystone
- stackforge/fuel-plugin-detach-rabbitmq
- stackforge/fuel-plugin-elasticsearch-kibana
- stackforge/fuel-plugin-external-emc
- stackforge/fuel-plugin-external-glusterfs
- stackforge/fuel-plugin-external-zabbix
- stackforge/fuel-plugin-glance-nfs
- stackforge/fuel-plugin-ha-fencing
- stackforge/fuel-plugin-influxdb-grafana
- stackforge/fuel-plugin-ironic
- stackforge/fuel-plugin-ldap
- stackforge/fuel-plugin-lma-collector
- stackforge/fuel-plugin-lma-infrastructure-alerting
- stackforge/fuel-plugin-mellanox
- stackforge/fuel-plugin-midonet
- stackforge/fuel-plugin-neutron-fwaas
- stackforge/fuel-plugin-neutron-lbaas
- stackforge/fuel-plugin-neutron-vpnaas
- stackforge/fuel-plugin-nova-nfs
- stackforge/fuel-plugin-nsxv
- stackforge/fuel-plugin-opendaylight
- stackforge/fuel-plugin-saltstack
- stackforge/fuel-plugin-solidfire-cinder
- stackforge/fuel-plugin-swiftstack
- stackforge/fuel-plugin-tintri-cinder
- stackforge/fuel-plugin-tls
- stackforge/fuel-plugin-vmware-dvs
- stackforge/fuel-plugin-vxlan
- stackforge/fuel-plugin-zabbix-monitoring-emc
- stackforge/fuel-plugin-zabbix-monitoring-extreme-networks
- stackforge/fuel-plugin-zabbix-snmptrapd
- stackforge/fuel-plugins
- stackforge/fuel-provision
- stackforge/fuel-qa
- stackforge/fuel-specs
- stackforge/fuel-stats
- stackforge/fuel-tasklib
- stackforge/fuel-upgrade
- stackforge/fuel-web
- stackforge/python-fuelclient
branches:
- master
openstack-anvil:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- stackforge/anvil
branches:
- master
openstack-glance:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- openstack/glance
- openstack/glance-specs
- openstack/glance_store
- openstack/python-glanceclient
branches:
- master
openstack-horizon:
events:
- patchset-created
- change-merged
- x-vrif-minus-2
projects:
- openstack/django-openstack-auth-kerberos
- openstack/django_openstack_auth
- openstack/horizon
- openstack/manila-ui
- openstack/tuskar-ui
- stackforge/xstatic-angular
- stackforge/xstatic-angular-animate
- stackforge/xstatic-angular-bootstrap
- stackforge/xstatic-angular-cookies
- stackforge/xstatic-angular-fileupload
- stackforge/xstatic-angular-lrdragndrop
- stackforge/xstatic-angular-mock
- stackforge/xstatic-angular-sanitize
- stackforge/xstatic-angular-smart-table
- stackforge/xstatic-bootstrap-datepicker
- stackforge/xstatic-bootstrap-scss
- stackforge/xstatic-d3
- stackforge/xstatic-font-awesome
- stackforge/xstatic-hogan
- stackforge/xstatic-jasmine
- stackforge/xstatic-jquery-migrate
- stackforge/xstatic-jquery.bootstrap.wizard
- stackforge/xstatic-jquery.quicksearch
- stackforge/xstatic-jquery.tablesorter
- stackforge/xstatic-jsencrypt
- stackforge/xstatic-magic-search
- stackforge/xstatic-qunit
- stackforge/xstatic-rickshaw
- stackforge/xstatic-spin
branches:
- master
"""
renamelist = [
'glance', # openstack project that doesn't need to be renamed
'fuel', # fake project but this text exists in places in the projects.yaml file
'fuel-tasklib', # stackforge project with groups and other nested attributes
'xstatic-jquery.tablesorter', # stackforge project with acl attribute
'anvil', # stackforge project, minimal attributes
'fake-project', # project name doesn't exist
'anvil-fake' # non-existant project with similar prefix
]
data = yaml.load(channelsYaml)
stacklist = mass_rename_projects.build_list("stackforge", renamelist)
gotData = mass_rename_projects.build_channel_data(stacklist, data)
# check result
expectedData = {
'fuel-tracker': {
'branches': [
'master'
],
'events': [
'patchset-created',
'change-merged',
'x-vrif-minus-2'
],
'projects': [
'openstack/fuel-plugin-bigswitch',
'openstack/fuel-plugin-block-device',
'openstack/fuel-plugin-openbook',
'openstack/fuel-plugin-purestorage-cinder',
'openstack/fuel-plugin-scaleio',
'openstack/fuel-plugin-wstunnel',
'openstack/fuel-plugin-xenserver',
'openstack/fuel-plugin-zabbix-agents',
'openstack/fuel-tasklib',
'stackforge/fuel-agent',
'stackforge/fuel-astute',
'stackforge/fuel-dev-tools',
'stackforge/fuel-devops',
'stackforge/fuel-docs',
'stackforge/fuel-library',
'stackforge/fuel-main',
'stackforge/fuel-mirror',
'stackforge/fuel-nailgun-agent',
'stackforge/fuel-octane',
'stackforge/fuel-ostf',
'stackforge/fuel-plugin-availability-zones',
'stackforge/fuel-plugin-calamari',
'stackforge/fuel-plugin-calico',
'stackforge/fuel-plugin-ceilometer-redis',
'stackforge/fuel-plugin-cinder-netapp',
'stackforge/fuel-plugin-cisco-aci',
'stackforge/fuel-plugin-contrail',
'stackforge/fuel-plugin-dbaas-trove',
'stackforge/fuel-plugin-detach-database',
'stackforge/fuel-plugin-detach-keystone',
'stackforge/fuel-plugin-detach-rabbitmq',
'stackforge/fuel-plugin-elasticsearch-kibana',
'stackforge/fuel-plugin-external-emc',
'stackforge/fuel-plugin-external-glusterfs',
'stackforge/fuel-plugin-external-zabbix',
'stackforge/fuel-plugin-glance-nfs',
'stackforge/fuel-plugin-ha-fencing',
'stackforge/fuel-plugin-influxdb-grafana',
'stackforge/fuel-plugin-ironic',
'stackforge/fuel-plugin-ldap',
'stackforge/fuel-plugin-lma-collector',
'stackforge/fuel-plugin-lma-infrastructure-alerting',
'stackforge/fuel-plugin-mellanox',
'stackforge/fuel-plugin-midonet',
'stackforge/fuel-plugin-neutron-fwaas',
'stackforge/fuel-plugin-neutron-lbaas',
'stackforge/fuel-plugin-neutron-vpnaas',
'stackforge/fuel-plugin-nova-nfs',
'stackforge/fuel-plugin-nsxv',
'stackforge/fuel-plugin-opendaylight',
'stackforge/fuel-plugin-saltstack',
'stackforge/fuel-plugin-solidfire-cinder',
'stackforge/fuel-plugin-swiftstack',
'stackforge/fuel-plugin-tintri-cinder',
'stackforge/fuel-plugin-tls',
'stackforge/fuel-plugin-vmware-dvs',
'stackforge/fuel-plugin-vxlan',
'stackforge/fuel-plugin-zabbix-monitoring-emc',
'stackforge/fuel-plugin-zabbix-monitoring-extreme-networks',
'stackforge/fuel-plugin-zabbix-snmptrapd',
'stackforge/fuel-plugins',
'stackforge/fuel-provision',
'stackforge/fuel-qa',
'stackforge/fuel-specs',
'stackforge/fuel-stats',
'stackforge/fuel-upgrade',
'stackforge/fuel-web',
'stackforge/python-fuelclient'
]
},
'openstack-glance': {
'branches': [
'master'
],
'events': [
'patchset-created',
'change-merged',
'x-vrif-minus-2'
],
'projects': [
'openstack/glance',
'openstack/glance-specs',
'openstack/glance_store',
'openstack/python-glanceclient'
]
},
'openstack-anvil': {
'branches': [
'master'
],
'events': [
'patchset-created',
'change-merged',
'x-vrif-minus-2'
],
'projects': [
'openstack/anvil'
]
},
'openstack-horizon': {
'branches': [
'master'
],
'events': [
'patchset-created',
'change-merged',
'x-vrif-minus-2'
],
'projects': [
'openstack/django-openstack-auth-kerberos',
'openstack/django_openstack_auth',
'openstack/horizon',
'openstack/manila-ui',
'openstack/tuskar-ui',
'openstack/xstatic-jquery.tablesorter',
'stackforge/xstatic-angular',
'stackforge/xstatic-angular-animate',
'stackforge/xstatic-angular-bootstrap',
'stackforge/xstatic-angular-cookies',
'stackforge/xstatic-angular-fileupload',
'stackforge/xstatic-angular-lrdragndrop',
'stackforge/xstatic-angular-mock',
'stackforge/xstatic-angular-sanitize',
'stackforge/xstatic-angular-smart-table',
'stackforge/xstatic-bootstrap-datepicker',
'stackforge/xstatic-bootstrap-scss',
'stackforge/xstatic-d3',
'stackforge/xstatic-font-awesome',
'stackforge/xstatic-hogan',
'stackforge/xstatic-jasmine',
'stackforge/xstatic-jquery-migrate',
'stackforge/xstatic-jquery.bootstrap.wizard',
'stackforge/xstatic-jquery.quicksearch',
'stackforge/xstatic-jsencrypt',
'stackforge/xstatic-magic-search',
'stackforge/xstatic-qunit',
'stackforge/xstatic-rickshaw',
'stackforge/xstatic-spin'
]
}
}
six.assertCountEqual(self, gotData, expectedData, "Check result for channels.yaml renames")
# TODO check zuul layout
def test_zuul_layout(self):
renamelist = [
'glance', # openstack project that doesn't need to be renamed
'fuel', # fake project but this text exists in places in the projects.yaml file
'fuel-tasklib', # stackforge project with groups and other nested attributes
'xstatic-jquery.tablesorter', # stackforge project with acl attribute
'anvil', # stackforge project, minimal attributes
'fake-project', # project name doesn't exist
'anvil-fake' # non-existant project with similar prefix
]
# not currently needed because the actual script shells out to sed
layoutYaml = """
"""
openlist = mass_rename_projects.build_list('openstack', renamelist) # zuul layout just uses the openlist as its data
expectedOpenlist = [
'openstack/glance',
'openstack/fuel',
'openstack/fuel-tasklib',
'openstack/xstatic-jquery.tablesorter',
'openstack/anvil',
'openstack/fake-project',
'openstack/anvil-fake'
]
six.assertCountEqual(self, openlist, expectedOpenlist, "Check zuul layout data")
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml=False
ContentHandler=object
else:
has_xml=True
import os,sys
from waflib.Tools import cxx
from waflib import Task,Utils,Options,Errors,Context
from waflib.TaskGen import feature,after_method,extension
from waflib.Configure import conf
from waflib import Logs
MOC_H=['.h','.hpp','.hxx','.hh']
EXT_RCC=['.qrc']
EXT_UI=['.ui']
EXT_QT5=['.cpp','.cc','.cxx','.C']
QT5_LIBS='''
qtmain
Qt5Bluetooth
Qt5CLucene
Qt5Concurrent
Qt5Core
Qt5DBus
Qt5Declarative
Qt5DesignerComponents
Qt5Designer
Qt5Gui
Qt5Help
Qt5MultimediaQuick_p
Qt5Multimedia
Qt5MultimediaWidgets
Qt5Network
Qt5Nfc
Qt5OpenGL
Qt5Positioning
Qt5PrintSupport
Qt5Qml
Qt5QuickParticles
Qt5Quick
Qt5QuickTest
Qt5Script
Qt5ScriptTools
Qt5Sensors
Qt5SerialPort
Qt5Sql
Qt5Svg
Qt5Test
Qt5WebKit
Qt5WebKitWidgets
Qt5Widgets
Qt5WinExtras
Qt5X11Extras
Qt5XmlPatterns
Qt5Xml'''
class qxx(Task.classes['cxx']):
def __init__(self,*k,**kw):
Task.Task.__init__(self,*k,**kw)
self.moc_done=0
def runnable_status(self):
if self.moc_done:
return Task.Task.runnable_status(self)
else:
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
self.add_moc_tasks()
return Task.Task.runnable_status(self)
def create_moc_task(self,h_node,m_node):
try:
moc_cache=self.generator.bld.moc_cache
except AttributeError:
moc_cache=self.generator.bld.moc_cache={}
try:
return moc_cache[h_node]
except KeyError:
tsk=moc_cache[h_node]=Task.classes['moc'](env=self.env,generator=self.generator)
tsk.set_inputs(h_node)
tsk.set_outputs(m_node)
if self.generator:
self.generator.tasks.append(tsk)
gen=self.generator.bld.producer
gen.outstanding.insert(0,tsk)
gen.total+=1
return tsk
else:
delattr(self,'cache_sig')
def moc_h_ext(self):
ext=[]
try:
ext=Options.options.qt_header_ext.split()
except AttributeError:
pass
if not ext:
ext=MOC_H
return ext
def add_moc_tasks(self):
node=self.inputs[0]
bld=self.generator.bld
try:
self.signature()
except KeyError:
pass
else:
delattr(self,'cache_sig')
include_nodes=[node.parent]+self.generator.includes_nodes
moctasks=[]
mocfiles=set([])
for d in bld.raw_deps.get(self.uid(),[]):
if not d.endswith('.moc'):
continue
if d in mocfiles:
continue
mocfiles.add(d)
h_node=None
base2=d[:-4]
for x in include_nodes:
for e in self.moc_h_ext():
h_node=x.find_node(base2+e)
if h_node:
break
if h_node:
m_node=h_node.change_ext('.moc')
break
else:
for k in EXT_QT5:
if base2.endswith(k):
for x in include_nodes:
h_node=x.find_node(base2)
if h_node:
break
if h_node:
m_node=h_node.change_ext(k+'.moc')
break
if not h_node:
raise Errors.WafError('No source found for %r which is a moc file'%d)
task=self.create_moc_task(h_node,m_node)
moctasks.append(task)
self.run_after.update(set(moctasks))
self.moc_done=1
class trans_update(Task.Task):
run_str='${QT_LUPDATE} ${SRC} -ts ${TGT}'
color='BLUE'
Task.update_outputs(trans_update)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf=[]
self.files=[]
def startElement(self,name,attrs):
if name=='file':
self.buf=[]
def endElement(self,name):
if name=='file':
self.files.append(str(''.join(self.buf)))
def characters(self,cars):
self.buf.append(cars)
@extension(*EXT_RCC)
def create_rcc_task(self,node):
rcnode=node.change_ext('_rc.cpp')
self.create_task('rcc',node,rcnode)
cpptask=self.create_task('cxx',rcnode,rcnode.change_ext('.o'))
try:
self.compiled_tasks.append(cpptask)
except AttributeError:
self.compiled_tasks=[cpptask]
return cpptask
@extension(*EXT_UI)
def create_uic_task(self,node):
uictask=self.create_task('ui5',node)
uictask.outputs=[self.path.find_or_declare(self.env['ui_PATTERN']%node.name[:-3])]
@extension('.ts')
def add_lang(self,node):
self.lang=self.to_list(getattr(self,'lang',[]))+[node]
@feature('qt5')
@after_method('apply_link')
def apply_qt5(self):
if getattr(self,'lang',None):
qmtasks=[]
for x in self.to_list(self.lang):
if isinstance(x,str):
x=self.path.find_resource(x+'.ts')
qmtasks.append(self.create_task('ts2qm',x,x.change_ext('.qm')))
if getattr(self,'update',None)and Options.options.trans_qt5:
cxxnodes=[a.inputs[0]for a in self.compiled_tasks]+[a.inputs[0]for a in self.tasks if getattr(a,'inputs',None)and a.inputs[0].name.endswith('.ui')]
for x in qmtasks:
self.create_task('trans_update',cxxnodes,x.inputs)
if getattr(self,'langname',None):
qmnodes=[x.outputs[0]for x in qmtasks]
rcnode=self.langname
if isinstance(rcnode,str):
rcnode=self.path.find_or_declare(rcnode+'.qrc')
t=self.create_task('qm2rcc',qmnodes,rcnode)
k=create_rcc_task(self,t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
lst=[]
for flag in self.to_list(self.env['CXXFLAGS']):
if len(flag)<2:continue
f=flag[0:2]
if f in('-D','-I','/D','/I'):
if(f[0]=='/'):
lst.append('-'+flag[1:])
else:
lst.append(flag)
self.env.append_value('MOC_FLAGS',lst)
@extension(*EXT_QT5)
def cxx_hook(self,node):
return self.create_compiled_task('qxx',node)
class rcc(Task.Task):
color='BLUE'
run_str='${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}'
ext_out=['.h']
def rcname(self):
return os.path.splitext(self.inputs[0].name)[0]
def scan(self):
if not has_xml:
Logs.error('no xml support was found, the rcc dependencies will be incomplete!')
return([],[])
parser=make_parser()
curHandler=XMLHandler()
parser.setContentHandler(curHandler)
fi=open(self.inputs[0].abspath(),'r')
try:
parser.parse(fi)
finally:
fi.close()
nodes=[]
names=[]
root=self.inputs[0].parent
for x in curHandler.files:
nd=root.find_resource(x)
if nd:nodes.append(nd)
else:names.append(x)
return(nodes,names)
class moc(Task.Task):
color='BLUE'
run_str='${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
class ui5(Task.Task):
color='BLUE'
run_str='${QT_UIC} ${SRC} -o ${TGT}'
ext_out=['.h']
class ts2qm(Task.Task):
color='BLUE'
run_str='${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}'
class qm2rcc(Task.Task):
color='BLUE'
after='ts2qm'
def run(self):
txt='\n'.join(['<file>%s</file>'%k.path_from(self.outputs[0].parent)for k in self.inputs])
code='<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>'%txt
self.outputs[0].write(code)
def configure(self):
self.find_qt5_binaries()
self.set_qt5_libs_to_check()
self.set_qt5_defines()
self.find_qt5_libraries()
self.add_qt5_rpath()
self.simplify_qt5_libs()
@conf
def find_qt5_binaries(self):
env=self.env
opt=Options.options
qtdir=getattr(opt,'qtdir','')
qtbin=getattr(opt,'qtbin','')
paths=[]
if qtdir:
qtbin=os.path.join(qtdir,'bin')
if not qtdir:
qtdir=os.environ.get('QT5_ROOT','')
qtbin=os.environ.get('QT5_BIN',None)or os.path.join(qtdir,'bin')
if qtbin:
paths=[qtbin]
if not qtdir:
paths=os.environ.get('PATH','').split(os.pathsep)
paths.append('/usr/share/qt5/bin/')
try:
lst=Utils.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
qtdir='/usr/local/Trolltech/%s/'%lst[0]
qtbin=os.path.join(qtdir,'bin')
paths.append(qtbin)
cand=None
prev_ver=['5','0','0']
for qmk in('qmake-qt5','qmake5','qmake'):
try:
qmake=self.find_program(qmk,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
try:
version=self.cmd_and_log(qmake+['-query','QT_VERSION']).strip()
except self.errors.WafError:
pass
else:
if version:
new_ver=version.split('.')
if new_ver>prev_ver:
cand=qmake
prev_ver=new_ver
if not cand:
try:
self.find_program('qtchooser')
except self.errors.ConfigurationError:
pass
else:
cmd=self.env.QTCHOOSER+['-qt=5','-run-tool=qmake']
try:
version=self.cmd_and_log(cmd+['-query','QT_VERSION'])
except self.errors.WafError:
pass
else:
cand=cmd
if cand:
self.env.QMAKE=cand
else:
self.fatal('Could not find qmake for qt5')
self.env.QT_INSTALL_BINS=qtbin=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_BINS']).strip()+os.sep
paths.insert(0,qtbin)
def find_bin(lst,var):
if var in env:
return
for f in lst:
try:
ret=self.find_program(f,path_list=paths)
except self.errors.ConfigurationError:
pass
else:
env[var]=ret
break
find_bin(['uic-qt5','uic'],'QT_UIC')
if not env.QT_UIC:
self.fatal('cannot find the uic compiler for qt5')
self.start_msg('Checking for uic version')
uicver=self.cmd_and_log(env.QT_UIC+['-version'],output=Context.BOTH)
uicver=''.join(uicver).strip()
uicver=uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt','')
self.end_msg(uicver)
if uicver.find(' 3.')!=-1 or uicver.find(' 4.')!=-1:
self.fatal('this uic compiler is for qt3 or qt5, add uic for qt5 to your path')
find_bin(['moc-qt5','moc'],'QT_MOC')
find_bin(['rcc-qt5','rcc'],'QT_RCC')
find_bin(['lrelease-qt5','lrelease'],'QT_LRELEASE')
find_bin(['lupdate-qt5','lupdate'],'QT_LUPDATE')
env['UIC_ST']='%s -o %s'
env['MOC_ST']='-o'
env['ui_PATTERN']='ui_%s.h'
env['QT_LRELEASE_FLAGS']=['-silent']
env.MOCCPPPATH_ST='-I%s'
env.MOCDEFINES_ST='-D%s'
@conf
def find_qt5_libraries(self):
qtlibs=getattr(Options.options,'qtlibs',None)or os.environ.get("QT5_LIBDIR",None)
if not qtlibs:
try:
qtlibs=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_LIBS']).strip()
except Errors.WafError:
qtdir=self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_PREFIX']).strip()+os.sep
qtlibs=os.path.join(qtdir,'lib')
self.msg('Found the Qt5 libraries in',qtlibs)
qtincludes=os.environ.get("QT5_INCLUDES",None)or self.cmd_and_log(self.env.QMAKE+['-query','QT_INSTALL_HEADERS']).strip()
env=self.env
if not'PKG_CONFIG_PATH'in os.environ:
os.environ['PKG_CONFIG_PATH']='%s:%s/pkgconfig:/usr/lib/qt5/lib/pkgconfig:/opt/qt5/lib/pkgconfig:/usr/lib/qt5/lib:/opt/qt5/lib'%(qtlibs,qtlibs)
try:
if os.environ.get("QT5_XCOMPILE",None):
raise self.errors.ConfigurationError()
self.check_cfg(atleast_pkgconfig_version='0.1')
except self.errors.ConfigurationError:
for i in self.qt5_vars:
uselib=i.upper()
if Utils.unversioned_sys_platform()=="darwin":
frameworkName=i+".framework"
qtDynamicLib=os.path.join(qtlibs,frameworkName,i)
if os.path.exists(qtDynamicLib):
env.append_unique('FRAMEWORK_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('INCLUDES_'+uselib,os.path.join(qtlibs,frameworkName,'Headers'))
elif env.DEST_OS!="win32":
qtDynamicLib=os.path.join(qtlibs,"lib"+i+".so")
qtStaticLib=os.path.join(qtlibs,"lib"+i+".a")
if os.path.exists(qtDynamicLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtDynamicLib,'GREEN')
elif os.path.exists(qtStaticLib):
env.append_unique('LIB_'+uselib,i)
self.msg('Checking for %s'%i,qtStaticLib,'GREEN')
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i))
else:
for k in("lib%s.a","lib%s5.a","%s.lib","%s5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i.replace('Qt5','Qt')))
uselib=i.upper()+"_debug"
for k in("lib%sd.a","lib%sd5.a","%sd.lib","%sd5.lib"):
lib=os.path.join(qtlibs,k%i)
if os.path.exists(lib):
env.append_unique('LIB_'+uselib,i+k[k.find("%s")+2:k.find('.')])
self.msg('Checking for %s'%i,lib,'GREEN')
break
else:
self.msg('Checking for %s'%i,False,'YELLOW')
env.append_unique('LIBPATH_'+uselib,qtlibs)
env.append_unique('INCLUDES_'+uselib,qtincludes)
env.append_unique('INCLUDES_'+uselib,os.path.join(qtincludes,i.replace('Qt5','Qt')))
else:
for i in self.qt5_vars_debug+self.qt5_vars:
self.check_cfg(package=i,args='--cflags --libs',mandatory=False)
@conf
def simplify_qt5_libs(self):
env=self.env
def process_lib(vars_,coreval):
for d in vars_:
var=d.upper()
if var=='QTCORE':
continue
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if lib in core:
continue
accu.append(lib)
env['LIBPATH_'+var]=accu
process_lib(self.qt5_vars,'LIBPATH_QTCORE')
process_lib(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def add_qt5_rpath(self):
env=self.env
if getattr(Options.options,'want_rpath',False):
def process_rpath(vars_,coreval):
for d in vars_:
var=d.upper()
value=env['LIBPATH_'+var]
if value:
core=env[coreval]
accu=[]
for lib in value:
if var!='QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var]=accu
process_rpath(self.qt5_vars,'LIBPATH_QTCORE')
process_rpath(self.qt5_vars_debug,'LIBPATH_QTCORE_DEBUG')
@conf
def set_qt5_libs_to_check(self):
if not hasattr(self,'qt5_vars'):
self.qt5_vars=QT5_LIBS
self.qt5_vars=Utils.to_list(self.qt5_vars)
if not hasattr(self,'qt5_vars_debug'):
self.qt5_vars_debug=[a+'_debug'for a in self.qt5_vars]
self.qt5_vars_debug=Utils.to_list(self.qt5_vars_debug)
@conf
def set_qt5_defines(self):
if sys.platform!='win32':
return
for x in self.qt5_vars:
y=x.replace('Qt5','Qt')[2:].upper()
self.env.append_unique('DEFINES_%s'%x.upper(),'QT_%s_LIB'%y)
self.env.append_unique('DEFINES_%s_DEBUG'%x.upper(),'QT_%s_LIB'%y)
def options(opt):
opt.add_option('--want-rpath',action='store_true',default=False,dest='want_rpath',help='enable the rpath for qt libraries')
opt.add_option('--header-ext',type='string',default='',help='header extension for moc files',dest='qt_header_ext')
for i in'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i,type='string',default='',dest=i)
opt.add_option('--translate',action="store_true",help="collect translation strings",dest="trans_qt5",default=False)
| |
"""Computational algebraic field theory. """
from __future__ import print_function, division
from sympy import (
S, Rational, AlgebraicNumber,
Add, Mul, sympify, Dummy, expand_mul, I, pi
)
from sympy.core.compatibility import reduce, range
from sympy.core.exprtools import Factors
from sympy.core.function import _mexpand
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.ntheory import sieve
from sympy.ntheory.factor_ import divisors
from sympy.polys.domains import ZZ, QQ
from sympy.polys.orthopolys import dup_chebyshevt
from sympy.polys.polyerrors import (
IsomorphismFailed,
CoercionFailed,
NotAlgebraic,
GeneratorsError,
)
from sympy.polys.polytools import (
Poly, PurePoly, invert, factor_list, groebner, resultant,
degree, poly_from_expr, parallel_poly_from_expr, lcm
)
from sympy.polys.polyutils import dict_from_expr, expr_from_dict
from sympy.polys.ring_series import rs_compose_add
from sympy.polys.rings import ring
from sympy.polys.rootoftools import CRootOf
from sympy.polys.specialpolys import cyclotomic_poly
from sympy.printing.lambdarepr import LambdaPrinter
from sympy.printing.pycode import PythonCodePrinter, MpmathPrinter
from sympy.simplify.radsimp import _split_gcd
from sympy.simplify.simplify import _is_sum_surds
from sympy.utilities import (
numbered_symbols, variations, lambdify, public, sift
)
from mpmath import pslq, mp
def _choose_factor(factors, x, v, dom=QQ, prec=200, bound=5):
"""
Return a factor having root ``v``
It is assumed that one of the factors has root ``v``.
"""
if isinstance(factors[0], tuple):
factors = [f[0] for f in factors]
if len(factors) == 1:
return factors[0]
points = {x:v}
symbols = dom.symbols if hasattr(dom, 'symbols') else []
t = QQ(1, 10)
for n in range(bound**len(symbols)):
prec1 = 10
n_temp = n
for s in symbols:
points[s] = n_temp % bound
n_temp = n_temp // bound
while True:
candidates = []
eps = t**(prec1 // 2)
for f in factors:
if abs(f.as_expr().evalf(prec1, points)) < eps:
candidates.append(f)
if candidates:
factors = candidates
if len(factors) == 1:
return factors[0]
if prec1 > prec:
break
prec1 *= 2
raise NotImplementedError("multiple candidates for the minimal polynomial of %s" % v)
def _separate_sq(p):
"""
helper function for ``_minimal_polynomial_sq``
It selects a rational ``g`` such that the polynomial ``p``
consists of a sum of terms whose surds squared have gcd equal to ``g``
and a sum of terms with surds squared prime with ``g``;
then it takes the field norm to eliminate ``sqrt(g)``
See simplify.simplify.split_surds and polytools.sqf_norm.
Examples
========
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> from sympy.polys.numberfields import _separate_sq
>>> p= -x + sqrt(2) + sqrt(3) + sqrt(7)
>>> p = _separate_sq(p); p
-x**2 + 2*sqrt(3)*x + 2*sqrt(7)*x - 2*sqrt(21) - 8
>>> p = _separate_sq(p); p
-x**4 + 4*sqrt(7)*x**3 - 32*x**2 + 8*sqrt(7)*x + 20
>>> p = _separate_sq(p); p
-x**8 + 48*x**6 - 536*x**4 + 1728*x**2 - 400
"""
from sympy.utilities.iterables import sift
def is_sqrt(expr):
return expr.is_Pow and expr.exp is S.Half
# p = c1*sqrt(q1) + ... + cn*sqrt(qn) -> a = [(c1, q1), .., (cn, qn)]
a = []
for y in p.args:
if not y.is_Mul:
if is_sqrt(y):
a.append((S.One, y**2))
elif y.is_Atom:
a.append((y, S.One))
elif y.is_Pow and y.exp.is_integer:
a.append((y, S.One))
else:
raise NotImplementedError
continue
T, F = sift(y.args, is_sqrt, binary=True)
a.append((Mul(*F), Mul(*T)**2))
a.sort(key=lambda z: z[1])
if a[-1][1] is S.One:
# there are no surds
return p
surds = [z for y, z in a]
for i in range(len(surds)):
if surds[i] != 1:
break
g, b1, b2 = _split_gcd(*surds[i:])
a1 = []
a2 = []
for y, z in a:
if z in b1:
a1.append(y*z**S.Half)
else:
a2.append(y*z**S.Half)
p1 = Add(*a1)
p2 = Add(*a2)
p = _mexpand(p1**2) - _mexpand(p2**2)
return p
def _minimal_polynomial_sq(p, n, x):
"""
Returns the minimal polynomial for the ``nth-root`` of a sum of surds
or ``None`` if it fails.
Parameters
==========
p : sum of surds
n : positive integer
x : variable of the returned polynomial
Examples
========
>>> from sympy.polys.numberfields import _minimal_polynomial_sq
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> q = 1 + sqrt(2) + sqrt(3)
>>> _minimal_polynomial_sq(q, 3, x)
x**12 - 4*x**9 - 4*x**6 + 16*x**3 - 8
"""
from sympy.simplify.simplify import _is_sum_surds
p = sympify(p)
n = sympify(n)
if not n.is_Integer or not n > 0 or not _is_sum_surds(p):
return None
pn = p**Rational(1, n)
# eliminate the square roots
p -= x
while 1:
p1 = _separate_sq(p)
if p1 is p:
p = p1.subs({x:x**n})
break
else:
p = p1
# _separate_sq eliminates field extensions in a minimal way, so that
# if n = 1 then `p = constant*(minimal_polynomial(p))`
# if n > 1 it contains the minimal polynomial as a factor.
if n == 1:
p1 = Poly(p)
if p.coeff(x**p1.degree(x)) < 0:
p = -p
p = p.primitive()[1]
return p
# by construction `p` has root `pn`
# the minimal polynomial is the factor vanishing in x = pn
factors = factor_list(p)[1]
result = _choose_factor(factors, x, pn)
return result
def _minpoly_op_algebraic_element(op, ex1, ex2, x, dom, mp1=None, mp2=None):
"""
return the minimal polynomial for ``op(ex1, ex2)``
Parameters
==========
op : operation ``Add`` or ``Mul``
ex1, ex2 : expressions for the algebraic elements
x : indeterminate of the polynomials
dom: ground domain
mp1, mp2 : minimal polynomials for ``ex1`` and ``ex2`` or None
Examples
========
>>> from sympy import sqrt, Add, Mul, QQ
>>> from sympy.polys.numberfields import _minpoly_op_algebraic_element
>>> from sympy.abc import x, y
>>> p1 = sqrt(sqrt(2) + 1)
>>> p2 = sqrt(sqrt(2) - 1)
>>> _minpoly_op_algebraic_element(Mul, p1, p2, x, QQ)
x - 1
>>> q1 = sqrt(y)
>>> q2 = 1 / y
>>> _minpoly_op_algebraic_element(Add, q1, q2, x, QQ.frac_field(y))
x**2*y**2 - 2*x*y - y**3 + 1
References
==========
.. [1] https://en.wikipedia.org/wiki/Resultant
.. [2] I.M. Isaacs, Proc. Amer. Math. Soc. 25 (1970), 638
"Degrees of sums in a separable field extension".
"""
y = Dummy(str(x))
if mp1 is None:
mp1 = _minpoly_compose(ex1, x, dom)
if mp2 is None:
mp2 = _minpoly_compose(ex2, y, dom)
else:
mp2 = mp2.subs({x: y})
if op is Add:
# mp1a = mp1.subs({x: x - y})
if dom == QQ:
R, X = ring('X', QQ)
p1 = R(dict_from_expr(mp1)[0])
p2 = R(dict_from_expr(mp2)[0])
else:
(p1, p2), _ = parallel_poly_from_expr((mp1, x - y), x, y)
r = p1.compose(p2)
mp1a = r.as_expr()
elif op is Mul:
mp1a = _muly(mp1, x, y)
else:
raise NotImplementedError('option not available')
if op is Mul or dom != QQ:
r = resultant(mp1a, mp2, gens=[y, x])
else:
r = rs_compose_add(p1, p2)
r = expr_from_dict(r.as_expr_dict(), x)
deg1 = degree(mp1, x)
deg2 = degree(mp2, y)
if op is Mul and deg1 == 1 or deg2 == 1:
# if deg1 = 1, then mp1 = x - a; mp1a = x - y - a;
# r = mp2(x - a), so that `r` is irreducible
return r
r = Poly(r, x, domain=dom)
_, factors = r.factor_list()
res = _choose_factor(factors, x, op(ex1, ex2), dom)
return res.as_expr()
def _invertx(p, x):
"""
Returns ``expand_mul(x**degree(p, x)*p.subs(x, 1/x))``
"""
p1 = poly_from_expr(p, x)[0]
n = degree(p1)
a = [c * x**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _muly(p, x, y):
"""
Returns ``_mexpand(y**deg*p.subs({x:x / y}))``
"""
p1 = poly_from_expr(p, x)[0]
n = degree(p1)
a = [c * x**i * y**(n - i) for (i,), c in p1.terms()]
return Add(*a)
def _minpoly_pow(ex, pw, x, dom, mp=None):
"""
Returns ``minpoly(ex**pw, x)``
Parameters
==========
ex : algebraic element
pw : rational number
x : indeterminate of the polynomial
dom: ground domain
mp : minimal polynomial of ``p``
Examples
========
>>> from sympy import sqrt, QQ, Rational
>>> from sympy.polys.numberfields import _minpoly_pow, minpoly
>>> from sympy.abc import x, y
>>> p = sqrt(1 + sqrt(2))
>>> _minpoly_pow(p, 2, x, QQ)
x**2 - 2*x - 1
>>> minpoly(p**2, x)
x**2 - 2*x - 1
>>> _minpoly_pow(y, Rational(1, 3), x, QQ.frac_field(y))
x**3 - y
>>> minpoly(y**Rational(1, 3), x)
x**3 - y
"""
pw = sympify(pw)
if not mp:
mp = _minpoly_compose(ex, x, dom)
if not pw.is_rational:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
if pw < 0:
if mp == x:
raise ZeroDivisionError('%s is zero' % ex)
mp = _invertx(mp, x)
if pw == -1:
return mp
pw = -pw
ex = 1/ex
y = Dummy(str(x))
mp = mp.subs({x: y})
n, d = pw.as_numer_denom()
res = Poly(resultant(mp, x**d - y**n, gens=[y]), x, domain=dom)
_, factors = res.factor_list()
res = _choose_factor(factors, x, ex**pw, dom)
return res.as_expr()
def _minpoly_add(x, dom, *a):
"""
returns ``minpoly(Add(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
p = a[0] + a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Add, p, px, x, dom, mp1=mp)
p = p + px
return mp
def _minpoly_mul(x, dom, *a):
"""
returns ``minpoly(Mul(*a), dom, x)``
"""
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
p = a[0] * a[1]
for px in a[2:]:
mp = _minpoly_op_algebraic_element(Mul, p, px, x, dom, mp1=mp)
p = p * px
return mp
def _minpoly_sin(ex, x):
"""
Returns the minimal polynomial of ``sin(ex)``
see http://mathworld.wolfram.com/TrigonometryAngles.html
"""
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.is_rational:
n = c.q
q = sympify(n)
if q.is_prime:
# for a = pi*p/q with q odd prime, using chebyshevt
# write sin(q*a) = mp(sin(a))*sin(a);
# the roots of mp(x) are sin(pi*p/q) for p = 1,..., q - 1
a = dup_chebyshevt(n, ZZ)
return Add(*[x**(n - i - 1)*a[i] for i in range(n)])
if c.p == 1:
if q == 9:
return 64*x**6 - 96*x**4 + 36*x**2 - 3
if n % 2 == 1:
# for a = pi*p/q with q odd, use
# sin(q*a) = 0 to see that the minimal polynomial must be
# a factor of dup_chebyshevt(n, ZZ)
a = dup_chebyshevt(n, ZZ)
a = [x**(n - i)*a[i] for i in range(n + 1)]
r = Add(*a)
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
expr = ((1 - cos(2*c*pi))/2)**S.Half
res = _minpoly_compose(expr, x, QQ)
return res
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_cos(ex, x):
"""
Returns the minimal polynomial of ``cos(ex)``
see http://mathworld.wolfram.com/TrigonometryAngles.html
"""
from sympy import sqrt
c, a = ex.args[0].as_coeff_Mul()
if a is pi:
if c.is_rational:
if c.p == 1:
if c.q == 7:
return 8*x**3 - 4*x**2 - 4*x + 1
if c.q == 9:
return 8*x**3 - 6*x + 1
elif c.p == 2:
q = sympify(c.q)
if q.is_prime:
s = _minpoly_sin(ex, x)
return _mexpand(s.subs({x:sqrt((1 - x)/2)}))
# for a = pi*p/q, cos(q*a) =T_q(cos(a)) = (-1)**p
n = int(c.q)
a = dup_chebyshevt(n, ZZ)
a = [x**(n - i)*a[i] for i in range(n + 1)]
r = Add(*a) - (-1)**c.p
_, factors = factor_list(r)
res = _choose_factor(factors, x, ex)
return res
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_exp(ex, x):
"""
Returns the minimal polynomial of ``exp(ex)``
"""
c, a = ex.args[0].as_coeff_Mul()
q = sympify(c.q)
if a == I*pi:
if c.is_rational:
if c.p == 1 or c.p == -1:
if q == 3:
return x**2 - x + 1
if q == 4:
return x**4 + 1
if q == 6:
return x**4 - x**2 + 1
if q == 8:
return x**8 + 1
if q == 9:
return x**6 - x**3 + 1
if q == 10:
return x**8 - x**6 + x**4 - x**2 + 1
if q.is_prime:
s = 0
for i in range(q):
s += (-x)**i
return s
# x**(2*q) = product(factors)
factors = [cyclotomic_poly(i, x) for i in divisors(2*q)]
mp = _choose_factor(factors, x, ex)
return mp
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
def _minpoly_rootof(ex, x):
"""
Returns the minimal polynomial of a ``CRootOf`` object.
"""
p = ex.expr
p = p.subs({ex.poly.gens[0]:x})
_, factors = factor_list(p, x)
result = _choose_factor(factors, x, ex)
return result
def _minpoly_compose(ex, x, dom):
"""
Computes the minimal polynomial of an algebraic element
using operations on minimal polynomials
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=True)
x**2 - 2*x - 1
>>> minimal_polynomial(sqrt(y) + 1/y, x, compose=True)
x**2*y**2 - 2*x*y - y**3 + 1
"""
if ex.is_Rational:
return ex.q*x - ex.p
if ex is I:
_, factors = factor_list(x**2 + 1, x, domain=dom)
return x**2 + 1 if len(factors) == 1 else x - I
if hasattr(dom, 'symbols') and ex in dom.symbols:
return x - ex
if dom.is_QQ and _is_sum_surds(ex):
# eliminate the square roots
ex -= x
while 1:
ex1 = _separate_sq(ex)
if ex1 is ex:
return ex
else:
ex = ex1
if ex.is_Add:
res = _minpoly_add(x, dom, *ex.args)
elif ex.is_Mul:
f = Factors(ex).factors
r = sift(f.items(), lambda itx: itx[0].is_Rational and itx[1].is_Rational)
if r[True] and dom == QQ:
ex1 = Mul(*[bx**ex for bx, ex in r[False] + r[None]])
r1 = dict(r[True])
dens = [y.q for y in r1.values()]
lcmdens = reduce(lcm, dens, 1)
neg1 = S.NegativeOne
expn1 = r1.pop(neg1, S.Zero)
nums = [base**(y.p*lcmdens // y.q) for base, y in r1.items()]
ex2 = Mul(*nums)
mp1 = minimal_polynomial(ex1, x)
# use the fact that in SymPy canonicalization products of integers
# raised to rational powers are organized in relatively prime
# bases, and that in ``base**(n/d)`` a perfect power is
# simplified with the root
# Powers of -1 have to be treated separately to preserve sign.
mp2 = ex2.q*x**lcmdens - ex2.p*neg1**(expn1*lcmdens)
ex2 = neg1**expn1 * ex2**Rational(1, lcmdens)
res = _minpoly_op_algebraic_element(Mul, ex1, ex2, x, dom, mp1=mp1, mp2=mp2)
else:
res = _minpoly_mul(x, dom, *ex.args)
elif ex.is_Pow:
res = _minpoly_pow(ex.base, ex.exp, x, dom)
elif ex.__class__ is sin:
res = _minpoly_sin(ex, x)
elif ex.__class__ is cos:
res = _minpoly_cos(ex, x)
elif ex.__class__ is exp:
res = _minpoly_exp(ex, x)
elif ex.__class__ is CRootOf:
res = _minpoly_rootof(ex, x)
else:
raise NotAlgebraic("%s doesn't seem to be an algebraic element" % ex)
return res
@public
def minimal_polynomial(ex, x=None, compose=True, polys=False, domain=None):
"""
Computes the minimal polynomial of an algebraic element.
Parameters
==========
ex : Expr
Element or expression whose minimal polynomial is to be calculated.
x : Symbol, optional
Independent variable of the minimal polynomial
compose : boolean, optional (default=True)
Method to use for computing minimal polynomial. If ``compose=True``
(default) then ``_minpoly_compose`` is used, if ``compose=False`` then
groebner bases are used.
polys : boolean, optional (default=False)
If ``True`` returns a ``Poly`` object else an ``Expr`` object.
domain : Domain, optional
Ground domain
Notes
=====
By default ``compose=True``, the minimal polynomial of the subexpressions of ``ex``
are computed, then the arithmetic operations on them are performed using the resultant
and factorization.
If ``compose=False``, a bottom-up algorithm is used with ``groebner``.
The default algorithm stalls less frequently.
If no ground domain is given, it will be generated automatically from the expression.
Examples
========
>>> from sympy import minimal_polynomial, sqrt, solve, QQ
>>> from sympy.abc import x, y
>>> minimal_polynomial(sqrt(2), x)
x**2 - 2
>>> minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2)))
x - sqrt(2)
>>> minimal_polynomial(sqrt(2) + sqrt(3), x)
x**4 - 10*x**2 + 1
>>> minimal_polynomial(solve(x**3 + x + 3)[0], x)
x**3 + x + 3
>>> minimal_polynomial(sqrt(y), x)
x**2 - y
"""
from sympy.polys.polytools import degree
from sympy.polys.domains import FractionField
from sympy.core.basic import preorder_traversal
ex = sympify(ex)
if ex.is_number:
# not sure if it's always needed but try it for numbers (issue 8354)
ex = _mexpand(ex, recursive=True)
for expr in preorder_traversal(ex):
if expr.is_AlgebraicNumber:
compose = False
break
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
if not domain:
if ex.free_symbols:
domain = FractionField(QQ, list(ex.free_symbols))
else:
domain = QQ
if hasattr(domain, 'symbols') and x in domain.symbols:
raise GeneratorsError("the variable %s is an element of the ground "
"domain %s" % (x, domain))
if compose:
result = _minpoly_compose(ex, x, domain)
result = result.primitive()[1]
c = result.coeff(x**degree(result, x))
if c.is_negative:
result = expand_mul(-result)
return cls(result, x, field=True) if polys else result.collect(x)
if not domain.is_QQ:
raise NotImplementedError("groebner method only works for QQ")
result = _minpoly_groebner(ex, x, cls)
return cls(result, x, field=True) if polys else result.collect(x)
def _minpoly_groebner(ex, x, cls):
"""
Computes the minimal polynomial of an algebraic number
using Groebner bases
Examples
========
>>> from sympy import minimal_polynomial, sqrt, Rational
>>> from sympy.abc import x
>>> minimal_polynomial(sqrt(2) + 3*Rational(1, 3), x, compose=False)
x**2 - 2*x - 1
"""
from sympy.polys.polytools import degree
from sympy.core.function import expand_multinomial
generator = numbered_symbols('a', cls=Dummy)
mapping, symbols = {}, {}
def update_mapping(ex, exp, base=None):
a = next(generator)
symbols[ex] = a
if base is not None:
mapping[ex] = a**exp + base
else:
mapping[ex] = exp.as_expr(a)
return a
def bottom_up_scan(ex):
if ex.is_Atom:
if ex is S.ImaginaryUnit:
if ex not in mapping:
return update_mapping(ex, 2, 1)
else:
return symbols[ex]
elif ex.is_Rational:
return ex
elif ex.is_Add:
return Add(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Mul:
return Mul(*[ bottom_up_scan(g) for g in ex.args ])
elif ex.is_Pow:
if ex.exp.is_Rational:
if ex.exp < 0 and ex.base.is_Add:
coeff, terms = ex.base.as_coeff_add()
elt, _ = primitive_element(terms, polys=True)
alg = ex.base - coeff
# XXX: turn this into eval()
inverse = invert(elt.gen + coeff, elt).as_expr()
base = inverse.subs(elt.gen, alg).expand()
if ex.exp == -1:
return bottom_up_scan(base)
else:
ex = base**(-ex.exp)
if not ex.exp.is_Integer:
base, exp = (
ex.base**ex.exp.p).expand(), Rational(1, ex.exp.q)
else:
base, exp = ex.base, ex.exp
base = bottom_up_scan(base)
expr = base**exp
if expr not in mapping:
return update_mapping(expr, 1/exp, -base)
else:
return symbols[expr]
elif ex.is_AlgebraicNumber:
if ex.root not in mapping:
return update_mapping(ex.root, ex.minpoly)
else:
return symbols[ex.root]
raise NotAlgebraic("%s doesn't seem to be an algebraic number" % ex)
def simpler_inverse(ex):
"""
Returns True if it is more likely that the minimal polynomial
algorithm works better with the inverse
"""
if ex.is_Pow:
if (1/ex.exp).is_integer and ex.exp < 0:
if ex.base.is_Add:
return True
if ex.is_Mul:
hit = True
for p in ex.args:
if p.is_Add:
return False
if p.is_Pow:
if p.base.is_Add and p.exp > 0:
return False
if hit:
return True
return False
inverted = False
ex = expand_multinomial(ex)
if ex.is_AlgebraicNumber:
return ex.minpoly.as_expr(x)
elif ex.is_Rational:
result = ex.q*x - ex.p
else:
inverted = simpler_inverse(ex)
if inverted:
ex = ex**-1
res = None
if ex.is_Pow and (1/ex.exp).is_Integer:
n = 1/ex.exp
res = _minimal_polynomial_sq(ex.base, n, x)
elif _is_sum_surds(ex):
res = _minimal_polynomial_sq(ex, S.One, x)
if res is not None:
result = res
if res is None:
bus = bottom_up_scan(ex)
F = [x - bus] + list(mapping.values())
G = groebner(F, list(symbols.values()) + [x], order='lex')
_, factors = factor_list(G[-1])
# by construction G[-1] has root `ex`
result = _choose_factor(factors, x, ex)
if inverted:
result = _invertx(result, x)
if result.coeff(x**degree(result, x)) < 0:
result = expand_mul(-result)
return result
minpoly = minimal_polynomial
def _coeffs_generator(n):
"""Generate coefficients for `primitive_element()`. """
for coeffs in variations([1, -1, 2, -2, 3, -3], n, repetition=True):
# Two linear combinations with coeffs of opposite signs are
# opposites of each other. Hence it suffices to test only one.
if coeffs[0] > 0:
yield list(coeffs)
@public
def primitive_element(extension, x=None, **args):
"""Construct a common number field for all extensions. """
if not extension:
raise ValueError("can't compute primitive element for empty extension")
if x is not None:
x, cls = sympify(x), Poly
else:
x, cls = Dummy('x'), PurePoly
if not args.get('ex', False):
gen, coeffs = extension[0], [1]
# XXX when minimal_polynomial is extended to work
# with AlgebraicNumbers this test can be removed
if isinstance(gen, AlgebraicNumber):
g = gen.minpoly.replace(x)
else:
g = minimal_polynomial(gen, x, polys=True)
for ext in extension[1:]:
_, factors = factor_list(g, extension=ext)
g = _choose_factor(factors, x, gen)
s, _, g = g.sqf_norm()
gen += s*ext
coeffs.append(s)
if not args.get('polys', False):
return g.as_expr(), coeffs
else:
return cls(g), coeffs
generator = numbered_symbols('y', cls=Dummy)
F, Y = [], []
for ext in extension:
y = next(generator)
if ext.is_Poly:
if ext.is_univariate:
f = ext.as_expr(y)
else:
raise ValueError("expected minimal polynomial, got %s" % ext)
else:
f = minpoly(ext, y)
F.append(f)
Y.append(y)
coeffs_generator = args.get('coeffs', _coeffs_generator)
for coeffs in coeffs_generator(len(Y)):
f = x - sum([ c*y for c, y in zip(coeffs, Y)])
G = groebner(F + [f], Y + [x], order='lex', field=True)
H, g = G[:-1], cls(G[-1], x, domain='QQ')
for i, (h, y) in enumerate(zip(H, Y)):
try:
H[i] = Poly(y - h, x,
domain='QQ').all_coeffs() # XXX: composite=False
except CoercionFailed: # pragma: no cover
break # G is not a triangular set
else:
break
else: # pragma: no cover
raise RuntimeError("run out of coefficient configurations")
_, g = g.clear_denoms()
if not args.get('polys', False):
return g.as_expr(), coeffs, H
else:
return g, coeffs, H
def is_isomorphism_possible(a, b):
"""Returns `True` if there is a chance for isomorphism. """
n = a.minpoly.degree()
m = b.minpoly.degree()
if m % n != 0:
return False
if n == m:
return True
da = a.minpoly.discriminant()
db = b.minpoly.discriminant()
i, k, half = 1, m//n, db//2
while True:
p = sieve[i]
P = p**k
if P > half:
break
if ((da % p) % 2) and not (db % P):
return False
i += 1
return True
def field_isomorphism_pslq(a, b):
"""Construct field isomorphism using PSLQ algorithm. """
if not a.root.is_real or not b.root.is_real:
raise NotImplementedError("PSLQ doesn't support complex coefficients")
f = a.minpoly
g = b.minpoly.replace(f.gen)
n, m, prev = 100, b.minpoly.degree(), None
for i in range(1, 5):
A = a.root.evalf(n)
B = b.root.evalf(n)
basis = [1, B] + [ B**i for i in range(2, m) ] + [A]
dps, mp.dps = mp.dps, n
coeffs = pslq(basis, maxcoeff=int(1e10), maxsteps=1000)
mp.dps = dps
if coeffs is None:
break
if coeffs != prev:
prev = coeffs
else:
break
coeffs = [S(c)/coeffs[-1] for c in coeffs[:-1]]
while not coeffs[-1]:
coeffs.pop()
coeffs = list(reversed(coeffs))
h = Poly(coeffs, f.gen, domain='QQ')
if f.compose(h).rem(g).is_zero:
d, approx = len(coeffs) - 1, 0
for i, coeff in enumerate(coeffs):
approx += coeff*B**(d - i)
if A*approx < 0:
return [ -c for c in coeffs ]
else:
return coeffs
elif f.compose(-h).rem(g).is_zero:
return [ -c for c in coeffs ]
else:
n *= 2
return None
def field_isomorphism_factor(a, b):
"""Construct field isomorphism via factorization. """
_, factors = factor_list(a.minpoly, extension=b)
for f, _ in factors:
if f.degree() == 1:
coeffs = f.rep.TC().to_sympy_list()
d, terms = len(coeffs) - 1, []
for i, coeff in enumerate(coeffs):
terms.append(coeff*b.root**(d - i))
root = Add(*terms)
if (a.root - root).evalf(chop=True) == 0:
return coeffs
if (a.root + root).evalf(chop=True) == 0:
return [-c for c in coeffs]
return None
@public
def field_isomorphism(a, b, **args):
"""Construct an isomorphism between two number fields. """
a, b = sympify(a), sympify(b)
if not a.is_AlgebraicNumber:
a = AlgebraicNumber(a)
if not b.is_AlgebraicNumber:
b = AlgebraicNumber(b)
if a == b:
return a.coeffs()
n = a.minpoly.degree()
m = b.minpoly.degree()
if n == 1:
return [a.root]
if m % n != 0:
return None
if args.get('fast', True):
try:
result = field_isomorphism_pslq(a, b)
if result is not None:
return result
except NotImplementedError:
pass
return field_isomorphism_factor(a, b)
@public
def to_number_field(extension, theta=None, **args):
"""Express `extension` in the field generated by `theta`. """
gen = args.get('gen')
if hasattr(extension, '__iter__'):
extension = list(extension)
else:
extension = [extension]
if len(extension) == 1 and type(extension[0]) is tuple:
return AlgebraicNumber(extension[0])
minpoly, coeffs = primitive_element(extension, gen, polys=True)
root = sum([ coeff*ext for coeff, ext in zip(coeffs, extension) ])
if theta is None:
return AlgebraicNumber((minpoly, root))
else:
theta = sympify(theta)
if not theta.is_AlgebraicNumber:
theta = AlgebraicNumber(theta, gen=gen)
coeffs = field_isomorphism(root, theta)
if coeffs is not None:
return AlgebraicNumber(theta, coeffs)
else:
raise IsomorphismFailed(
"%s is not in a subfield of %s" % (root, theta.root))
class IntervalPrinter(MpmathPrinter, LambdaPrinter):
"""Use ``lambda`` printer but print numbers as ``mpi`` intervals. """
def _print_Integer(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Integer(expr)
def _print_Rational(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Half(self, expr):
return "mpi('%s')" % super(PythonCodePrinter, self)._print_Rational(expr)
def _print_Pow(self, expr):
return super(MpmathPrinter, self)._print_Pow(expr, rational=True)
@public
def isolate(alg, eps=None, fast=False):
"""Give a rational isolating interval for an algebraic number. """
alg = sympify(alg)
if alg.is_Rational:
return (alg, alg)
elif not alg.is_real:
raise NotImplementedError(
"complex algebraic numbers are not supported")
func = lambdify((), alg, modules="mpmath", printer=IntervalPrinter())
poly = minpoly(alg, polys=True)
intervals = poly.intervals(sqf=True)
dps, done = mp.dps, False
try:
while not done:
alg = func()
for a, b in intervals:
if a <= alg.a and alg.b <= b:
done = True
break
else:
mp.dps *= 2
finally:
mp.dps = dps
if eps is not None:
a, b = poly.refine_root(a, b, eps=eps, fast=fast)
return (a, b)
| |
#*******************************************************************************
#Program : scrapping MVG Rad data to postgres database *
#Database : POSTGRES *
#Language : Python *
#Date : 01.05.2016 *
#*******************************************************************************
#*******************************************************************************
#******************************importing packages*******************************
#*******************************************************************************
import time #time
import psycopg2 #psycopg2 to connect to POSTGRES database
import json #json to load json
import urllib2 #urlib2 to open url
from datetime import datetime #datetime
import calendar #date
#*******************************************************************************
#*********************connecting to the postgres database***********************
#*******************************************************************************
try:
conn = psycopg2.connect("dbname='postgres' user='postgres' host='localhost'\
password='smohanta'")
cur = conn.cursor()
except:
print "Unable to connect to the database"
#*******************************************************************************
#***************************reading weather table*******************************
#*******************************************************************************
#selecting records from table weather
cur.execute("SELECT * from weather")
rows_weather = cur.fetchall()
#*******************************************************************************
#********************calcualtion for holiday using holidayapi*******************
#*******************************************************************************
year = time.strftime("%Y")
day = time.strftime("%d")
month_num = time.strftime("%m")
#wd_url = "http://holidayapi.com/v1/holidays?country=DE&year=" + str(year) + \
#"&month=" + str(month_num) + "&day=" + str(day)
#dataHoliday = json.load(urllib2.urlopen(wd_url))
if time.strftime("%A") == "Saturday" or time.strftime("%A") == "Sunday" :
workingday = "No"
#elif len(dataHoliday["holidays"]) == 0:
# workingday = "Yes"
#else:
# workingday = "No"
else:
workingday = "Yes"
#*******************************************************************************
#scrapping data from MVG https://carsharing.mvg-mobil.de/json/stations.php (json)
#*******************************************************************************
data = json.load(urllib2.urlopen("https://carsharing.mvg-mobil.de/json/stations.php"))
#initializing variables
#*******************
i = 0 #counter for total iteration
j = 0 #counts total bike in a station
k = 0 #counter to get bike id
exists = 0
new_bike = 0
new = 0
zone_id = ""
date = ""
provider = "MVG Rad"
date_var = time.strftime("%d%m%Y")
month = time.strftime("%B")
#Main logic starts
counter = data["cnt"]#counter holds the total number of records scrapped
while i < counter:#looping through the whole data to process one by one
#number 11 is the bike section
if data["sta"][i]["prv"] == 11 :
latitude = data["sta"][i]["loc"][0]
longitude = data["sta"][i]["loc"][1]
bikeorstations_Id = data["sta"][i]["id"]
timestamp = time.time()
current_time = time.strftime("%H%M%S")
if time.strftime("%B") == "March" or time.strftime("%B") == "April" or \
time.strftime("%B") == "May":
season = "Spring"
elif time.strftime("%B") == "June" or time.strftime("%B") == "July" or \
time.strftime("%B") == "August":
season = "Summer"
elif time.strftime("%B") == "September" or time.strftime("%B") == \
"October" or time.strftime("%B") == "November":
season = "Autumn"
else:
season = "Winter"
location = ""
weather = ""
isdaytime = ""
isday = ""
temp = 0
temp_unit = ""
feels_like = 0
feels_like_unit = ""
wind_speed = 0
wind_speed_unit = ""
dew = 0
dew_unit = 0
location = ""
easy_timestamp = 0
if "zid" in data["sta"][i]:
zone_id = data["sta"][i]["zid"]
for row in rows_weather:
if row[1] == str(zone_id) :
weather = row[5]
isday = row[6]
temp = row[7]
temp_unit = row[8]
feels_like = row[9]
feels_like_unit = row[10]
wind_speed = row[11]
wind_speed_unit = row[12]
dew = row[13]
dew_unit = row[14]
location = row[2]
weather_timestamp = row[4]
if "vhc" in data["sta"][i]:
countVch = len(data['sta'][i]['vhc'])
numberofbike = countVch
j = j + countVch
while k < countVch :
bike_id = data["sta"][i]["vhc"][k]["id"]
k = k+1
try:
cur.execute("SELECT bike_id, latitude, longitude,timestamp_update \
FROM public.mvgtodb WHERE BIKE_ID = %(bike_id)s order by timestamp_update desc limit 1;",{'bike_id':bike_id})
rows_latest_loc = cur.fetchall()
conn.commit()
except Exception, e:
conn.rollback()
if cur.rowcount == 1:
for rows in rows_latest_loc:
if rows[1] != latitude or rows[2] != longitude :
print 'new location'
print rows_latest_loc
new_bike = new_bike + 1
print latitude
print longitude
print bike_id
d = datetime.now()
std_timestamp = d #standard timestamp
s = str(d)
timestamp_update = int(s[0:4] + s[5:7] + s[8:10] + s[11:13] + s[14:16] + \
s[17:19]+ s[21:26]) #easy timestamp to work with
entry = ({"bike_id":bike_id, "latitude":latitude , "longitude": \
longitude, "timestamp_update":timestamp_update,\
"std_timestamp":std_timestamp,"weather_timestamp":weather_timestamp,\
"zone_id": zone_id ,"provider": provider , "year":\
year, "date": date_var,"month":month, "season":season,\
"bikeorstations_Id":bikeorstations_Id, "numberofbike":numberofbike,\
"workingday":workingday,\
"current_time":current_time,"local_name":location,\
"weather":weather,"isdaytime":isday,\
"temp_value":temp,"temp_unit":temp_unit,"feels_like":feels_like,\
"feels_like_unit":feels_like_unit,"wind_speed":wind_speed,"wind_unit":wind_speed_unit,\
"dew_point":dew,"dew_point_unit":dew_unit})
#inserting data into database
try:
cur.execute("""INSERT INTO mvgtodb VALUES (%(bike_id)s, %(latitude)s,\
%(longitude)s,%(timestamp_update)s,%(std_timestamp)s,%(weather_timestamp)s,\
%(zone_id)s,%(provider)s,%(year)s,%(date)s,%(month)s,\
%(season)s,%(bikeorstations_Id)s ,%(numberofbike)s,%(workingday)s,\
%(current_time)s,%(local_name)s,%(weather)s,\
%(isdaytime)s,%(temp_value)s,%(temp_unit)s,\
%(feels_like)s,%(feels_like_unit)s,%(wind_speed)s,%(wind_unit)s,\
%(dew_point)s,%(dew_point_unit)s) """, entry)
conn.commit()
except Exception, e:
conn.commit()
exists = exists + 1
else:
exists = exists + 1
else:
new_bike = new_bike + 1
d = datetime.now()
std_timestamp = d #standard timestamp
s = str(d)
timestamp_update = int(s[0:4] + s[5:7] + s[8:10] + s[11:13] + s[14:16] + \
s[17:19]+ s[21:26]) #easy timestamp to work with
entry = ({"bike_id":bike_id, "latitude":latitude , "longitude": \
longitude, "timestamp_update":timestamp_update,\
"std_timestamp":std_timestamp,"weather_timestamp":weather_timestamp,\
"zone_id": zone_id ,"provider": provider , "year":\
year, "date": date_var,"month":month, "season":season,\
"bikeorstations_Id":bikeorstations_Id, "numberofbike":numberofbike,\
"workingday":workingday,\
"current_time":current_time,"local_name":location,\
"weather":weather,"isdaytime":isday,\
"temp_value":temp,"temp_unit":temp_unit,"feels_like":feels_like,\
"feels_like_unit":feels_like_unit,"wind_speed":wind_speed,"wind_unit":wind_speed_unit,\
"dew_point":dew,"dew_point_unit":dew_unit})
#inserting data into database
try:
cur.execute("""INSERT INTO mvgtodb VALUES (%(bike_id)s, %(latitude)s,\
%(longitude)s,%(timestamp_update)s,%(std_timestamp)s,%(weather_timestamp)s,\
%(zone_id)s,%(provider)s,%(year)s,%(date)s,%(month)s,\
%(season)s,%(bikeorstations_Id)s ,%(numberofbike)s,%(workingday)s,\
%(current_time)s,%(local_name)s,%(weather)s,\
%(isdaytime)s,%(temp_value)s,%(temp_unit)s,\
%(feels_like)s,%(feels_like_unit)s,%(wind_speed)s,%(wind_unit)s,\
%(dew_point)s,%(dew_point_unit)s)""", entry)
conn.commit()
except Exception, e:
exists = exists + 1
i = i + 1
k = 0 #clear
print "Number of Rad = %s" %j
print "Number of Rad already exists in db = %s" %exists
new = new_bike
print "Number of Rad update = %s" %new
d = datetime.now()
std_timestamp = str(d) #standard timestamp
entry = ({"timestamp":std_timestamp, "scanned_bike":j , "updated_bike": new})
cur.execute("""INSERT INTO mvglogdb VALUES (%(timestamp)s, %(scanned_bike)s,\
%(updated_bike)s) """, entry)
conn.commit()
| |
# -*- coding: utf-8 -*-
# Copyright: See the LICENSE file.
import datetime
import unittest
from factory import base
from factory import declarations
from factory import errors
from factory import helpers
from .compat import mock
from . import utils
class OrderedDeclarationTestCase(unittest.TestCase):
def test_errors(self):
with self.assertRaises(NotImplementedError):
utils.evaluate_declaration(declarations.OrderedDeclaration())
class DigTestCase(unittest.TestCase):
class MyObj(object):
def __init__(self, n):
self.n = n
def test_chaining(self):
obj = self.MyObj(1)
obj.a = self.MyObj(2)
obj.a.b = self.MyObj(3)
obj.a.b.c = self.MyObj(4)
self.assertEqual(2, declarations.deepgetattr(obj, 'a').n)
self.assertRaises(AttributeError, declarations.deepgetattr, obj, 'b')
self.assertEqual(2, declarations.deepgetattr(obj, 'a.n'))
self.assertEqual(3, declarations.deepgetattr(obj, 'a.c', 3))
self.assertRaises(AttributeError, declarations.deepgetattr, obj, 'a.c.n')
self.assertRaises(AttributeError, declarations.deepgetattr, obj, 'a.d')
self.assertEqual(3, declarations.deepgetattr(obj, 'a.b').n)
self.assertEqual(3, declarations.deepgetattr(obj, 'a.b.n'))
self.assertEqual(4, declarations.deepgetattr(obj, 'a.b.c').n)
self.assertEqual(4, declarations.deepgetattr(obj, 'a.b.c.n'))
self.assertEqual(42, declarations.deepgetattr(obj, 'a.b.c.n.x', 42))
class SelfAttributeTestCase(unittest.TestCase):
def test_standard(self):
a = declarations.SelfAttribute('foo.bar.baz')
self.assertEqual(0, a.depth)
self.assertEqual('foo.bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_dot(self):
a = declarations.SelfAttribute('.bar.baz')
self.assertEqual(1, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_default(self):
a = declarations.SelfAttribute('bar.baz', 42)
self.assertEqual(0, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(42, a.default)
def test_parent(self):
a = declarations.SelfAttribute('..bar.baz')
self.assertEqual(2, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
def test_grandparent(self):
a = declarations.SelfAttribute('...bar.baz')
self.assertEqual(3, a.depth)
self.assertEqual('bar.baz', a.attribute_name)
self.assertEqual(declarations._UNSPECIFIED, a.default)
class IteratorTestCase(unittest.TestCase):
def test_cycle(self):
it = declarations.Iterator([1, 2])
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=1))
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=2))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=3))
def test_no_cycling(self):
it = declarations.Iterator([1, 2], cycle=False)
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=1))
self.assertRaises(StopIteration, utils.evaluate_declaration, it, force_sequence=2)
def test_initial_reset(self):
it = declarations.Iterator([1, 2])
it.reset()
def test_reset_cycle(self):
it = declarations.Iterator([1, 2])
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=1))
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=2))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=3))
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=4))
it.reset()
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=5))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=6))
def test_reset_no_cycling(self):
it = declarations.Iterator([1, 2], cycle=False)
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=1))
self.assertRaises(StopIteration, utils.evaluate_declaration, it, force_sequence=2)
it.reset()
self.assertEqual(1, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=1))
self.assertRaises(StopIteration, utils.evaluate_declaration, it, force_sequence=2)
def test_getter(self):
it = declarations.Iterator([(1, 2), (1, 3)], getter=lambda p: p[1])
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=0))
self.assertEqual(3, utils.evaluate_declaration(it, force_sequence=1))
self.assertEqual(2, utils.evaluate_declaration(it, force_sequence=2))
self.assertEqual(3, utils.evaluate_declaration(it, force_sequence=3))
class PostGenerationDeclarationTestCase(unittest.TestCase):
def test_post_generation(self):
call_params = []
def foo(*args, **kwargs):
call_params.append(args)
call_params.append(kwargs)
helpers.build(
dict,
foo=declarations.PostGeneration(foo),
foo__bar=42,
blah=42,
blah__baz=1,
)
self.assertEqual(2, len(call_params))
self.assertEqual(3, len(call_params[0])) # instance, step, context.value
self.assertEqual({'bar': 42}, call_params[1])
def test_decorator_simple(self):
call_params = []
@helpers.post_generation
def foo(*args, **kwargs):
call_params.append(args)
call_params.append(kwargs)
helpers.build(
dict,
foo=foo,
foo__bar=42,
blah=42,
blah__baz=1,
)
self.assertEqual(2, len(call_params))
self.assertEqual(3, len(call_params[0])) # instance, step, context.value
self.assertEqual({'bar': 42}, call_params[1])
class FactoryWrapperTestCase(unittest.TestCase):
def test_invalid_path(self):
self.assertRaises(ValueError, declarations._FactoryWrapper, 'UnqualifiedSymbol')
self.assertRaises(ValueError, declarations._FactoryWrapper, 42)
def test_class(self):
w = declarations._FactoryWrapper(datetime.date)
self.assertEqual(datetime.date, w.get())
def test_path(self):
w = declarations._FactoryWrapper('datetime.date')
self.assertEqual(datetime.date, w.get())
def test_lazyness(self):
f = declarations._FactoryWrapper('factory.declarations.Sequence')
self.assertEqual(None, f.factory)
factory_class = f.get()
self.assertEqual(declarations.Sequence, factory_class)
def test_cache(self):
"""Ensure that _FactoryWrapper tries to import only once."""
orig_date = datetime.date
w = declarations._FactoryWrapper('datetime.date')
self.assertEqual(None, w.factory)
factory_class = w.get()
self.assertEqual(orig_date, factory_class)
try:
# Modify original value
datetime.date = None
# Repeat import
factory_class = w.get()
self.assertEqual(orig_date, factory_class)
finally:
# IMPORTANT: restore attribute.
datetime.date = orig_date
class PostGenerationMethodCallTestCase(unittest.TestCase):
def build(self, declaration, **params):
f = helpers.make_factory(mock.MagicMock, post=declaration)
return f(**params)
def test_simplest_setup_and_call(self):
obj = self.build(
declarations.PostGenerationMethodCall('method'),
)
obj.method.assert_called_once_with()
def test_call_with_method_args(self):
obj = self.build(
declarations.PostGenerationMethodCall('method', 'data'),
)
obj.method.assert_called_once_with('data')
def test_call_with_passed_extracted_string(self):
obj = self.build(
declarations.PostGenerationMethodCall('method'),
post='data',
)
obj.method.assert_called_once_with('data')
def test_call_with_passed_extracted_int(self):
obj = self.build(
declarations.PostGenerationMethodCall('method'),
post=1,
)
obj.method.assert_called_once_with(1)
def test_call_with_passed_extracted_iterable(self):
obj = self.build(
declarations.PostGenerationMethodCall('method'),
post=(1, 2, 3),
)
obj.method.assert_called_once_with((1, 2, 3))
def test_call_with_method_kwargs(self):
obj = self.build(
declarations.PostGenerationMethodCall('method', data='data'),
)
obj.method.assert_called_once_with(data='data')
def test_call_with_passed_kwargs(self):
obj = self.build(
declarations.PostGenerationMethodCall('method'),
post__data='other',
)
obj.method.assert_called_once_with(data='other')
def test_multi_call_with_multi_method_args(self):
with self.assertRaises(errors.InvalidDeclarationError):
self.build(
declarations.PostGenerationMethodCall('method', 'arg1', 'arg2'),
)
class PostGenerationOrdering(unittest.TestCase):
def test_post_generation_declaration_order(self):
postgen_results = []
class Related(base.Factory):
class Meta:
model = mock.MagicMock()
class Ordered(base.Factory):
class Meta:
model = mock.MagicMock()
a = declarations.RelatedFactory(Related)
z = declarations.RelatedFactory(Related)
@helpers.post_generation
def a1(*args, **kwargs):
postgen_results.append('a1')
@helpers.post_generation
def zz(*args, **kwargs):
postgen_results.append('zz')
@helpers.post_generation
def aa(*args, **kwargs):
postgen_results.append('aa')
postgen_names = Ordered._meta.post_declarations.sorted()
self.assertEqual(postgen_names, ['a', 'z', 'a1', 'zz', 'aa'])
# Test generation happens in desired order
Ordered()
self.assertEqual(postgen_results, ['a1', 'zz', 'aa'])
if __name__ == '__main__': # pragma: no cover
unittest.main()
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_serialization import jsonutils
from oslo_utils import reflection
from heatclient._i18n import _
verbose = 0
class BaseException(Exception):
"""An error occurred."""
def __init__(self, message=None):
self.message = message
def __str__(self):
return self.message or self.__class__.__doc__
class CommandError(BaseException):
"""Invalid usage of CLI."""
class InvalidEndpoint(BaseException):
"""The provided endpoint is invalid."""
class CommunicationError(BaseException):
"""Unable to communicate with server."""
class HTTPException(BaseException):
"""Base exception for all HTTP-derived exceptions."""
code = 'N/A'
def __init__(self, message=None, code=None):
super(HTTPException, self).__init__(message)
try:
self.error = jsonutils.loads(message)
if 'error' not in self.error:
raise KeyError(_('Key "error" not exists'))
except KeyError:
# NOTE(jianingy): If key 'error' happens not exist,
# self.message becomes no sense. In this case, we
# return doc of current exception class instead.
self.error = {'error':
{'message': self.__class__.__doc__}}
except Exception:
self.error = {'error':
{'message': self.message or self.__class__.__doc__}}
if self.code == "N/A" and code is not None:
self.code = code
def __str__(self):
message = self.error['error'].get('message', 'Internal Error')
if verbose:
traceback = self.error['error'].get('traceback', '')
return (_('ERROR: %(message)s\n%(traceback)s') %
{'message': message, 'traceback': traceback})
else:
return _('ERROR: %s') % message
class HTTPMultipleChoices(HTTPException):
code = 300
def __str__(self):
self.details = _("Requested version of Heat API is not available.")
return (_("%(name)s (HTTP %(code)s) %(details)s") %
{
'name': reflection.get_class_name(self, fully_qualified=False),
'code': self.code,
'details': self.details})
class BadRequest(HTTPException):
"""DEPRECATED."""
code = 400
class HTTPBadRequest(BadRequest):
pass
class Unauthorized(HTTPException):
"""DEPRECATED."""
code = 401
class HTTPUnauthorized(Unauthorized):
pass
class Forbidden(HTTPException):
"""DEPRECATED."""
code = 403
class HTTPForbidden(Forbidden):
pass
class NotFound(HTTPException):
"""DEPRECATED."""
code = 404
class HTTPNotFound(NotFound):
pass
class NoUniqueMatch(HTTPException):
pass
class HTTPMethodNotAllowed(HTTPException):
code = 405
class Conflict(HTTPException):
"""DEPRECATED."""
code = 409
class HTTPConflict(Conflict):
pass
class OverLimit(HTTPException):
"""DEPRECATED."""
code = 413
class HTTPOverLimit(OverLimit):
pass
class HTTPUnsupported(HTTPException):
code = 415
class HTTPInternalServerError(HTTPException):
code = 500
class HTTPNotImplemented(HTTPException):
code = 501
class HTTPBadGateway(HTTPException):
code = 502
class ServiceUnavailable(HTTPException):
"""DEPRECATED."""
code = 503
class HTTPServiceUnavailable(ServiceUnavailable):
pass
# NOTE(bcwaldon): Build a mapping of HTTP codes to corresponding exception
# classes
_code_map = {}
for obj_name in dir(sys.modules[__name__]):
if obj_name.startswith('HTTP'):
obj = getattr(sys.modules[__name__], obj_name)
_code_map[obj.code] = obj
def from_response(response):
"""Return an instance of an HTTPException based on requests response."""
cls = _code_map.get(response.status_code, HTTPException)
return cls(response.content, response.status_code)
class NoTokenLookupException(Exception):
"""DEPRECATED."""
pass
class EndpointNotFound(Exception):
"""DEPRECATED."""
pass
class StackFailure(Exception):
pass
| |
#!/usr/bin/env python
"""Script to read the libphonenumber per-prefix metadata and generate Python code.
Invocation:
buildprefixdata.py [options] indir outfile module_prefix
Processes all of the per-prefix data under the given input directory and emit
generated Python code.
Options:
--var XXX : use this prefix for variable names in generated code
--flat : don't do per-locale processing
--sep C : expect metadata to be a list with C as separator
"""
# Based on original metadata data files from libphonenumber:
# resources/geocoding/*/*.txt, resources/carrier/*/*.txt
# Copyright (C) 2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import glob
import re
import getopt
import datetime
# Use the local code in preference to any pre-installed version
sys.path.insert(0, '../../python')
from phonenumbers.util import prnt, rpr
PREFIXDATA_SUFFIX = ".txt"
BLANK_LINE_RE = re.compile(r'^\s*$', re.UNICODE)
COMMENT_LINE_RE = re.compile(r'^\s*#.*$', re.UNICODE)
DATA_LINE_RE = re.compile(r'^\+?(?P<prefix>\d+)\|(?P<stringdata>.*)$', re.UNICODE)
# Boilerplate header
PREFIXDATA_LOCALE_FILE_PROLOG = '''"""Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from %(module)s.util import u
'''
PREFIXDATA_FILE_PROLOG = '''"""Per-prefix data, mapping each prefix to a name.
Auto-generated file, do not edit by hand.
"""
from %(module)s.util import u
'''
# Copyright notice covering the XML metadata; include current year.
COPYRIGHT_NOTICE = """# Copyright (C) 2011-%s The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" % datetime.datetime.now().year
def load_locale_prefixdata_file(prefixdata, filename, locale=None, overall_prefix=None, separator=None):
"""Load per-prefix data from the given file, for the given locale and prefix.
We assume that this file:
- is encoded in UTF-8
- may have comment lines (starting with #) and blank lines
- has data lines of the form '<prefix>|<stringdata>'
- contains only data for prefixes that are extensions of the filename.
If overall_prefix is specified, lines are checked to ensure their prefix falls within this value.
If locale is specified, prefixdata[prefix][locale] is filled in; otherwise, just prefixdata[prefix].
If separator is specified, the string data will be split on this separator, and the output values
in the dict will be tuples of strings rather than strings.
"""
with open(filename, "rb") as infile:
lineno = 0
for line in infile:
uline = line.decode('utf-8')
lineno += 1
dm = DATA_LINE_RE.match(uline)
if dm:
prefix = dm.group('prefix')
stringdata = dm.group('stringdata')
if stringdata != stringdata.rstrip():
print ("%s:%d: Warning: stripping trailing whitespace" % (filename, lineno))
stringdata = stringdata.rstrip()
if overall_prefix is not None and not prefix.startswith(overall_prefix):
raise Exception("%s:%d: Prefix %s is not within %s" %
(filename, lineno, prefix, overall_prefix))
if separator is not None:
stringdata = tuple(stringdata.split(separator))
if prefix not in prefixdata:
prefixdata[prefix] = {}
if locale is not None:
prefixdata[prefix][locale] = stringdata
else:
prefixdata[prefix] = stringdata
elif BLANK_LINE_RE.match(uline):
pass
elif COMMENT_LINE_RE.match(uline):
pass
else:
raise Exception("%s:%d: Unexpected line format: %s" %
(filename, lineno, line))
def load_locale_prefixdata(indir, separator=None):
"""Load per-prefix data from the given top-level directory.
Prefix data is assumed to be held in files <indir>/<locale>/<prefix>.txt.
The same prefix may occur in multiple files, giving the prefix's description
in different locales.
"""
prefixdata = {} # prefix => dict mapping locale to description
for locale in os.listdir(indir):
if not os.path.isdir(os.path.join(indir, locale)):
continue
for filename in glob.glob(os.path.join(indir, locale, "*%s" % PREFIXDATA_SUFFIX)):
overall_prefix, ext = os.path.splitext(os.path.basename(filename))
load_locale_prefixdata_file(prefixdata, filename, locale, overall_prefix, separator)
return prefixdata
def _stable_dict_repr(strdict):
"""Return a repr() for a dict keyed by a string, in sorted key order"""
lines = []
for key in sorted(strdict.keys()):
lines.append("'%s': %s" % (key, rpr(strdict[key])))
return "{%s}" % ", ".join(lines)
def _tuple_repr(data):
"""Return a repr() for a list/tuple"""
if len(data) == 1:
return "(%s,)" % rpr(data[0])
else:
return "(%s)" % ", ".join([rpr(x) for x in data])
def output_prefixdata_code(prefixdata, outfilename, module_prefix, varprefix, per_locale):
"""Output the per-prefix data in Python form to the given file """
with open(outfilename, "w") as outfile:
longest_prefix = 0
if per_locale:
prnt(PREFIXDATA_LOCALE_FILE_PROLOG % {'module': module_prefix}, file=outfile)
else:
prnt(PREFIXDATA_FILE_PROLOG % {'module': module_prefix}, file=outfile)
prnt(COPYRIGHT_NOTICE, file=outfile)
prnt("%s_DATA = {" % varprefix, file=outfile)
for prefix in sorted(prefixdata.keys()):
if len(prefix) > longest_prefix:
longest_prefix = len(prefix)
if per_locale:
prnt(" '%s':%s," % (prefix, _stable_dict_repr(prefixdata[prefix])), file=outfile)
else:
prnt(" '%s':%s," % (prefix, _tuple_repr(prefixdata[prefix])), file=outfile)
prnt("}", file=outfile)
prnt("%s_LONGEST_PREFIX = %d" % (varprefix, longest_prefix), file=outfile)
def _standalone(argv):
"""Parse the given input directory and emit generated code."""
varprefix = "GEOCODE"
per_locale = True
separator = None
try:
opts, args = getopt.getopt(argv, "hv:fs:", ("help", "var=", "flat", "sep="))
except getopt.GetoptError:
prnt(__doc__, file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
prnt(__doc__, file=sys.stderr)
sys.exit(1)
elif opt in ("-v", "--var"):
varprefix = arg
elif opt in ("-f", "--flat"):
per_locale = False
elif opt in ("-s", "--sep"):
separator = arg
else:
prnt("Unknown option %s" % opt, file=sys.stderr)
prnt(__doc__, file=sys.stderr)
sys.exit(1)
if len(args) != 3:
prnt(__doc__, file=sys.stderr)
sys.exit(1)
if per_locale:
prefixdata = load_locale_prefixdata(args[0], separator=separator)
else:
prefixdata = {}
load_locale_prefixdata_file(prefixdata, args[0], separator=separator)
output_prefixdata_code(prefixdata, args[1], args[2], varprefix, per_locale)
if __name__ == "__main__":
_standalone(sys.argv[1:])
| |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Magnetic space groups.
"""
import os
import sqlite3
import textwrap
from array import array
from fractions import Fraction
import numpy as np
from monty.design_patterns import cached_class
from pymatgen.core.operations import MagSymmOp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.groups import SymmetryGroup, in_array_list
from pymatgen.symmetry.settings import JonesFaithfulTransformation
from pymatgen.util.string import transformation_to_string
__author__ = "Matthew Horton, Shyue Ping Ong"
MAGSYMM_DATA = os.path.join(os.path.dirname(__file__), "symm_data_magnetic.sqlite")
@cached_class
class MagneticSpaceGroup(SymmetryGroup):
"""
Representation of a magnetic space group.
"""
def __init__(self, id, setting_transformation="a,b,c;0,0,0"):
"""
Initializes a MagneticSpaceGroup from its Belov, Neronova and
Smirnova (BNS) number supplied as a list or its label supplied
as a string. To create a magnetic structure in pymatgen, the
Structure.from_magnetic_spacegroup() method can be used, which
relies on this class.
The main difference between magnetic space groups and normal
crystallographic space groups is the inclusion of a time reversal
operator that acts on an atom's magnetic moment. This is
indicated by a prime symbol (') next to the respective symmetry
operation in its label, e.g. the standard crystallographic
space group Pnma has magnetic subgroups Pn'ma, Pnm'a, Pnma',
Pn'm'a, Pnm'a', Pn'ma', Pn'm'a'.
The magnetic space groups are classified as one of 4 types
where G = magnetic space group, and F = parent crystallographic
space group:
1. G=F no time reversal, i.e. the same as corresponding
crystallographic group
2. G=F+F1', "grey" groups, where avg. magnetic moment is zero,
e.g. a paramagnet in zero ext. mag. field
3. G=D+(F-D)1', where D is an equi-translation subgroup of F of
index 2, lattice translations do not include time reversal
4. G=D+(F-D)1', where D is an equi-class subgroup of F of index 2
There are two common settings for magnetic space groups, BNS
and OG. In case 4, the BNS setting != OG setting, and so a
transformation to go between the two settings is required:
specifically, the BNS setting is derived from D, and the OG
setting is derived from F.
This means that the OG setting refers to the unit cell if magnetic
order is neglected, and requires multiple unit cells to reproduce
the full crystal periodicity when magnetic moments are present.
This does not make the OG setting, in general, useful for
electronic structure calculations and the BNS setting is preferred.
However, this class does contain information on the OG setting and
can be initialized from OG labels or numbers if required.
Conventions: ITC monoclinic unique axis b, monoclinic cell choice 1,
hexagonal axis for trigonal groups, origin choice 2 for groups with
more than one origin choice (ISO-MAG).
Raw data comes from ISO-MAG, ISOTROPY Software Suite, iso.byu.edu
http://stokes.byu.edu/iso/magnetic_data.txt
with kind permission from Professor Branton Campbell, BYU
Data originally compiled from:
(1) Daniel B. Litvin, Magnetic Group Tables (International Union
of Crystallography, 2013) www.iucr.org/publ/978-0-9553602-2-0.
(2) C. J. Bradley and A. P. Cracknell, The Mathematical Theory of
Symmetry in Solids (Clarendon Press, Oxford, 1972).
See http://stokes.byu.edu/iso/magneticspacegroupshelp.php for more
information on magnetic symmetry.
:param id: BNS number supplied as list of 2 ints or BNS label as
str or index as int (1-1651) to iterate over all space groups"""
self._data = {}
# Datafile is stored as sqlite3 database since (a) it can be easily
# queried for various different indexes (BNS/OG number/labels) and (b)
# allows binary data to be stored in a compact form similar to that in
# the source data file, significantly reducing file size.
# Note that a human-readable JSON format was tested first but was 20x
# larger and required *much* longer initial loading times.
# retrieve raw data
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
id = "".join(id.split()) # remove any white space
c.execute("SELECT * FROM space_groups WHERE BNS_label=?;", (id,))
elif isinstance(id, list):
c.execute("SELECT * FROM space_groups WHERE BNS1=? AND BNS2=?;", (id[0], id[1]))
elif isinstance(id, int):
# OG3 index is a 'master' index, going from 1 to 1651
c.execute("SELECT * FROM space_groups WHERE OG3=?;", (id,))
raw_data = list(c.fetchone())
# Jones Faithful transformation
self.jf = JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0")
if isinstance(setting_transformation, str):
if setting_transformation != "a,b,c;0,0,0":
self.jf = JonesFaithfulTransformation.from_transformation_string(setting_transformation)
elif isinstance(setting_transformation, JonesFaithfulTransformation):
if setting_transformation != self.jf:
self.jf = setting_transformation
self._data["magtype"] = raw_data[0] # int from 1 to 4
self._data["bns_number"] = [raw_data[1], raw_data[2]]
self._data["bns_label"] = raw_data[3]
self._data["og_number"] = [raw_data[4], raw_data[5], raw_data[6]]
self._data["og_label"] = raw_data[7] # can differ from BNS_label
def _get_point_operator(idx):
"""Retrieve information on point operator (rotation matrix and Seitz label)."""
hex = self._data["bns_number"][0] >= 143 and self._data["bns_number"][0] <= 194
c.execute(
"SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;",
(idx - 1, hex),
)
op = c.fetchone()
op = {
"symbol": op[0],
"matrix": np.array(op[1].split(","), dtype="f").reshape(3, 3),
}
return op
def _parse_operators(b):
"""Parses compact binary representation into list of MagSymmOps."""
if len(b) == 0: # e.g. if magtype != 4, OG setting == BNS setting, and b == [] for OG symmops
return None
raw_symops = [b[i : i + 6] for i in range(0, len(b), 6)]
symops = []
for r in raw_symops:
point_operator = _get_point_operator(r[0])
translation_vec = [r[1] / r[4], r[2] / r[4], r[3] / r[4]]
time_reversal = r[5]
op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=point_operator["matrix"],
translation_vec=translation_vec,
time_reversal=time_reversal,
)
# store string representation, e.g. (2x|1/2,1/2,1/2)'
seitz = "({}|{},{},{})".format(
point_operator["symbol"],
Fraction(translation_vec[0]),
Fraction(translation_vec[1]),
Fraction(translation_vec[2]),
)
if time_reversal == -1:
seitz += "'"
symops.append({"op": op, "str": seitz})
return symops
def _parse_wyckoff(b):
"""Parses compact binary representation into list of Wyckoff sites."""
if len(b) == 0:
return None
wyckoff_sites = []
def get_label(idx):
if idx <= 25:
return chr(97 + idx) # returns a-z when idx 0-25
return "alpha" # when a-z labels exhausted, use alpha, only relevant for a few space groups
o = 0 # offset
n = 1 # nth Wyckoff site
num_wyckoff = b[0]
while len(wyckoff_sites) < num_wyckoff:
m = b[1 + o] # multiplicity
label = str(b[2 + o] * m) + get_label(num_wyckoff - n)
sites = []
for j in range(m):
s = b[3 + o + (j * 22) : 3 + o + (j * 22) + 22] # data corresponding to specific Wyckoff position
translation_vec = [s[0] / s[3], s[1] / s[3], s[2] / s[3]]
matrix = [
[s[4], s[7], s[10]],
[s[5], s[8], s[11]],
[s[6], s[9], s[12]],
]
matrix_magmom = [
[s[13], s[16], s[19]],
[s[14], s[17], s[20]],
[s[15], s[18], s[21]],
]
# store string representation, e.g. (x,y,z;mx,my,mz)
wyckoff_str = "({};{})".format(
transformation_to_string(matrix, translation_vec),
transformation_to_string(matrix_magmom, c="m"),
)
sites.append(
{
"translation_vec": translation_vec,
"matrix": matrix,
"matrix_magnetic": matrix_magmom,
"str": wyckoff_str,
}
)
# only keeping string representation of Wyckoff sites for now
# could do something else with these in future
wyckoff_sites.append({"label": label, "str": " ".join([s["str"] for s in sites])})
n += 1
o += m * 22 + 2
return wyckoff_sites
def _parse_lattice(b):
"""Parses compact binary representation into list of lattice vectors/centerings."""
if len(b) == 0:
return None
raw_lattice = [b[i : i + 4] for i in range(0, len(b), 4)]
lattice = []
for r in raw_lattice:
lattice.append(
{
"vector": [r[0] / r[3], r[1] / r[3], r[2] / r[3]],
"str": "({},{},{})+".format(
Fraction(r[0] / r[3]).limit_denominator(),
Fraction(r[1] / r[3]).limit_denominator(),
Fraction(r[2] / r[3]).limit_denominator(),
),
}
)
return lattice
def _parse_transformation(b):
"""Parses compact binary representation into transformation between OG and BNS settings."""
if len(b) == 0:
return None
# capital letters used here by convention,
# IUCr defines P and p specifically
P = [[b[0], b[3], b[6]], [b[1], b[4], b[7]], [b[2], b[5], b[8]]]
p = [b[9] / b[12], b[10] / b[12], b[11] / b[12]]
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=("a", "b", "c"))
p_string = "{},{},{}".format(
Fraction(p[0]).limit_denominator(),
Fraction(p[1]).limit_denominator(),
Fraction(p[2]).limit_denominator(),
)
return P_string + ";" + p_string
for i in range(8, 15):
try:
raw_data[i] = array("b", raw_data[i]) # construct array from sql binary blobs
except Exception:
# array() behavior changed, need to explicitly convert buffer to str in earlier Python
raw_data[i] = array("b", str(raw_data[i]))
self._data["og_bns_transform"] = _parse_transformation(raw_data[8])
self._data["bns_operators"] = _parse_operators(raw_data[9])
self._data["bns_lattice"] = _parse_lattice(raw_data[10])
self._data["bns_wyckoff"] = _parse_wyckoff(raw_data[11])
self._data["og_operators"] = _parse_operators(raw_data[12])
self._data["og_lattice"] = _parse_lattice(raw_data[13])
self._data["og_wyckoff"] = _parse_wyckoff(raw_data[14])
db.close()
@classmethod
def from_og(cls, id):
"""
Initialize from Opechowski and Guccione (OG) label or number.
:param id: OG number supplied as list of 3 ints or
or OG label as str
:return:
"""
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
c.execute("SELECT BNS_label FROM space_groups WHERE OG_label=?", (id,))
elif isinstance(id, list):
c.execute(
"SELECT BNS_label FROM space_groups WHERE OG1=? and OG2=? and OG3=?",
(id[0], id[1], id[2]),
)
bns_label = c.fetchone()[0]
db.close()
return cls(bns_label)
def __eq__(self, other):
return self._data == other._data
@property
def crystal_system(self):
"""
:return: Crystal system, e.g., cubic, hexagonal, etc.
"""
i = self._data["bns_number"][0]
if i <= 2:
return "triclinic"
if i <= 15:
return "monoclinic"
if i <= 74:
return "orthorhombic"
if i <= 142:
return "tetragonal"
if i <= 167:
return "trigonal"
if i <= 194:
return "hexagonal"
return "cubic"
@property
def sg_symbol(self):
"""
:return: Space group symbol
"""
return self._data["bns_label"]
@property
def symmetry_ops(self):
"""
Retrieve magnetic symmetry operations of the space group.
:return: List of :class:`pymatgen.core.operations.MagSymmOp`
"""
ops = [op_data["op"] for op_data in self._data["bns_operators"]]
# add lattice centerings
centered_ops = []
lattice_vectors = [l["vector"] for l in self._data["bns_lattice"]]
for vec in lattice_vectors:
if not (np.array_equal(vec, [1, 0, 0]) or np.array_equal(vec, [0, 1, 0]) or np.array_equal(vec, [0, 0, 1])):
for op in ops:
new_vec = op.translation_vector + vec
new_op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
op.rotation_matrix,
translation_vec=new_vec,
time_reversal=op.time_reversal,
)
centered_ops.append(new_op)
ops = ops + centered_ops
# apply jones faithful transformation
ops = [self.jf.transform_symmop(op) for op in ops]
return ops
def get_orbit(self, p, m, tol=1e-5):
"""
Returns the orbit for a point and its associated magnetic moment.
Args:
p: Point as a 3x1 array.
m: A magnetic moment, compatible with
:class:`pymatgen.electronic_structure.core.Magmom`
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
(([array], [array])) Tuple of orbit for point and magnetic moments for orbit.
"""
orbit = []
orbit_magmoms = []
m = Magmom(m)
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
mm = o.operate_magmom(m)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
orbit_magmoms.append(mm)
return orbit, orbit_magmoms
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
# function from pymatgen.symmetry.groups.SpaceGroup
abc = lattice.lengths
angles = lattice.angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all(abs(i - j) < tolerance for i, j in zip(param, ref) if j is not None)
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "hexagonal" or (crys_system == "trigonal" and self.sg_symbol.endswith("H")):
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 120], angle_tol)
if crys_system == "trigonal":
a = abc[0]
return check(abc, [a, a, a], tol)
if crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
if crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
def data_str(self, include_og=True):
"""
Get description of all data, including information for OG setting.
:return: str
"""
# __str__() omits information on OG setting to reduce confusion
# as to which set of symops are active, this property gives
# all stored data including OG setting
desc = {} # dictionary to hold description strings
description = ""
# parse data into strings
# indicate if non-standard setting specified
if self.jf != JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0"):
description += "Non-standard setting: .....\n"
description += self.jf.__repr__()
description += "\n\nStandard setting information: \n"
desc["magtype"] = self._data["magtype"]
desc["bns_number"] = ".".join(map(str, self._data["bns_number"]))
desc["bns_label"] = self._data["bns_label"]
desc["og_id"] = (
"\t\tOG: " + ".".join(map(str, self._data["og_number"])) + " " + self._data["og_label"]
if include_og
else ""
)
desc["bns_operators"] = " ".join([op_data["str"] for op_data in self._data["bns_operators"]])
desc["bns_lattice"] = (
" ".join([lattice_data["str"] for lattice_data in self._data["bns_lattice"][3:]])
if len(self._data["bns_lattice"]) > 3
else ""
) # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+
desc["bns_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["bns_wyckoff"]
]
)
desc["og_bns_transformation"] = (
"OG-BNS Transform: ({})\n".format(self._data["og_bns_transform"])
if desc["magtype"] == 4 and include_og
else ""
)
bns_operators_prefix = "Operators{}: ".format(" (BNS)" if desc["magtype"] == 4 and include_og else "")
bns_wyckoff_prefix = "Wyckoff Positions{}: ".format(" (BNS)" if desc["magtype"] == 4 and include_og else "")
# apply textwrap on long lines
desc["bns_operators"] = textwrap.fill(
desc["bns_operators"],
initial_indent=bns_operators_prefix,
subsequent_indent=" " * len(bns_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += (
"BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
"{d[og_bns_transformation]}"
"{d[bns_operators]}\n"
"{bns_wyckoff_prefix}{d[bns_lattice]}\n"
"{d[bns_wyckoff]}"
).format(d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)
if desc["magtype"] == 4 and include_og:
desc["og_operators"] = " ".join([op_data["str"] for op_data in self._data["og_operators"]])
# include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
# not always present in OG setting
desc["og_lattice"] = " ".join([lattice_data["str"] for lattice_data in self._data["og_lattice"]])
desc["og_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["og_wyckoff"]
]
)
og_operators_prefix = "Operators (OG): "
# apply textwrap on long lines
desc["og_operators"] = textwrap.fill(
desc["og_operators"],
initial_indent=og_operators_prefix,
subsequent_indent=" " * len(og_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += ("\n{d[og_operators]}\nWyckoff Positions (OG): {d[og_lattice]}\n" "{d[og_wyckoff]}").format(
d=desc
)
elif desc["magtype"] == 4:
description += "\nAlternative OG setting exists for this space group."
return description
def __str__(self):
"""
String representation of the space group, specifying the setting
of the space group, its magnetic symmetry operators and Wyckoff
positions.
:return: str
"""
return self.data_str(include_og=False)
def _write_all_magnetic_space_groups_to_file(filename):
"""
Write all magnetic space groups to a human-readable text file.
Should contain same information as text files provided by ISO-MAG.
:param filename:
:return:
"""
s = (
"Data parsed from raw data from:\n"
"ISO-MAG, ISOTROPY Software Suite, iso.byu.edu\n"
"http://stokes.byu.edu/iso/magnetic_data.txt\n"
"Used with kind permission from Professor Branton Campbell, BYU\n\n"
)
all_msgs = []
for i in range(1, 1652):
all_msgs.append(MagneticSpaceGroup(i))
for msg in all_msgs:
s += f"\n{msg.data_str()}\n\n--------\n"
with open(filename, "w") as f:
f.write(s)
| |
# Generated from pascal.g4 by ANTLR 4.5.3
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2O")
buf.write("\u0289\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\4O\tO\4P\tP\4Q\tQ\4R\tR\4S\tS\4T\tT\4U\t")
buf.write("U\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\tZ\4[\t[\4\\\t\\\4]\t]\4")
buf.write("^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4c\tc\4d\td\4e\te\4f\tf\4")
buf.write("g\tg\4h\th\4i\ti\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3")
buf.write("\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3")
buf.write("\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3")
buf.write("\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27")
buf.write("\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\34")
buf.write("\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36")
buf.write("\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3 ")
buf.write("\3 \3 \3 \3 \3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3#\3#\3#\3")
buf.write("#\3#\3#\3$\3$\3$\3$\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3\'")
buf.write("\3\'\3\'\3\'\3\'\3(\3(\3(\3(\3)\3)\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3+\3+\3+\3+\3+\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3-\3-\3")
buf.write(".\3.\3.\3/\3/\3/\3/\3/\3/\3/\3/\3\60\3\60\3\60\3\60\3")
buf.write("\60\3\60\3\61\3\61\3\61\3\61\3\62\3\62\3\62\3\62\3\63")
buf.write("\3\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\65\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\3\67\3\67\3\67\3\67\3\67\3\67")
buf.write("\3\67\3\67\3\67\3\67\38\38\38\38\38\38\38\38\39\39\39")
buf.write("\39\39\3:\3:\3:\3:\3:\3:\3:\3;\3;\3;\3;\3;\3;\3;\3<\3")
buf.write("<\3<\3<\3=\3=\3=\3=\3=\3>\3>\3>\3?\3?\3?\3?\3?\3@\3@\3")
buf.write("@\3@\3@\3@\3A\3A\3A\3A\3B\3B\3B\3B\3B\3B\3C\3C\3C\3C\3")
buf.write("C\3D\3D\3E\3E\3F\3F\3G\3G\3H\3H\3H\3I\3I\3J\3J\3K\3K\3")
buf.write("L\3L\3M\3M\3M\3N\3N\3O\3O\3O\3P\3P\3P\3Q\3Q\3R\3R\3S\3")
buf.write("S\3T\3T\3U\3U\3U\3V\3V\3W\3W\3W\3X\3X\3Y\3Y\3Z\3Z\3[\3")
buf.write("[\3[\3\\\3\\\3]\3]\3^\3^\3^\3^\3^\3_\3_\3_\3_\3_\3_\3")
buf.write("_\3_\3_\3_\3`\3`\3`\3`\3`\3a\3a\3a\3a\3a\3a\3a\3b\3b\3")
buf.write("b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3b\3c\3c\3c\3c\3d\3")
buf.write("d\3d\3d\7d\u0247\nd\fd\16d\u024a\13d\3d\3d\3d\3d\3d\3")
buf.write("e\3e\7e\u0253\ne\fe\16e\u0256\13e\3e\3e\3e\3e\3f\3f\7")
buf.write("f\u025e\nf\ff\16f\u0261\13f\3g\3g\3g\3g\7g\u0267\ng\f")
buf.write("g\16g\u026a\13g\3g\3g\3h\6h\u026f\nh\rh\16h\u0270\3h\3")
buf.write("h\6h\u0275\nh\rh\16h\u0276\3h\5h\u027a\nh\5h\u027c\nh")
buf.write("\3h\5h\u027f\nh\3i\3i\5i\u0283\ni\3i\6i\u0286\ni\ri\16")
buf.write("i\u0287\4\u0248\u0254\2j\3\2\5\2\7\2\t\2\13\2\r\2\17\2")
buf.write("\21\2\23\2\25\2\27\2\31\2\33\2\35\2\37\2!\2#\2%\2\'\2")
buf.write(")\2+\2-\2/\2\61\2\63\2\65\2\67\39\4;\5=\6?\7A\bC\tE\n")
buf.write("G\13I\fK\rM\16O\17Q\20S\21U\22W\23Y\24[\25]\26_\27a\30")
buf.write("c\31e\32g\33i\34k\35m\36o\37q s!u\"w#y${%}&\177\'\u0081")
buf.write("(\u0083)\u0085*\u0087+\u0089,\u008b-\u008d.\u008f/\u0091")
buf.write("\60\u0093\61\u0095\62\u0097\63\u0099\64\u009b\65\u009d")
buf.write("\66\u009f\67\u00a18\u00a39\u00a5:\u00a7;\u00a9<\u00ab")
buf.write("=\u00ad>\u00af?\u00b1@\u00b3A\u00b5B\u00b7C\u00b9D\u00bb")
buf.write("E\u00bdF\u00bfG\u00c1H\u00c3I\u00c5J\u00c7K\u00c9L\u00cb")
buf.write("M\u00cdN\u00cfO\u00d1\2\3\2!\4\2CCcc\4\2DDdd\4\2EEee\4")
buf.write("\2FFff\4\2GGgg\4\2HHhh\4\2IIii\4\2JJjj\4\2KKkk\4\2LLl")
buf.write("l\4\2MMmm\4\2NNnn\4\2OOoo\4\2PPpp\4\2QQqq\4\2RRrr\4\2")
buf.write("SSss\4\2TTtt\4\2UUuu\4\2VVvv\4\2WWww\4\2XXxx\4\2YYyy\4")
buf.write("\2ZZzz\4\2[[{{\4\2\\\\||\5\2\13\f\17\17\"\"\4\2C\\c|\6")
buf.write("\2\62;C\\aac|\3\2))\4\2--//\u0279\2\67\3\2\2\2\29\3\2")
buf.write("\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3")
buf.write("\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M")
buf.write("\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2U\3\2\2\2\2")
buf.write("W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2\2_\3\2\2\2")
buf.write("\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2\2\2i\3\2\2")
buf.write("\2\2k\3\2\2\2\2m\3\2\2\2\2o\3\2\2\2\2q\3\2\2\2\2s\3\2")
buf.write("\2\2\2u\3\2\2\2\2w\3\2\2\2\2y\3\2\2\2\2{\3\2\2\2\2}\3")
buf.write("\2\2\2\2\177\3\2\2\2\2\u0081\3\2\2\2\2\u0083\3\2\2\2\2")
buf.write("\u0085\3\2\2\2\2\u0087\3\2\2\2\2\u0089\3\2\2\2\2\u008b")
buf.write("\3\2\2\2\2\u008d\3\2\2\2\2\u008f\3\2\2\2\2\u0091\3\2\2")
buf.write("\2\2\u0093\3\2\2\2\2\u0095\3\2\2\2\2\u0097\3\2\2\2\2\u0099")
buf.write("\3\2\2\2\2\u009b\3\2\2\2\2\u009d\3\2\2\2\2\u009f\3\2\2")
buf.write("\2\2\u00a1\3\2\2\2\2\u00a3\3\2\2\2\2\u00a5\3\2\2\2\2\u00a7")
buf.write("\3\2\2\2\2\u00a9\3\2\2\2\2\u00ab\3\2\2\2\2\u00ad\3\2\2")
buf.write("\2\2\u00af\3\2\2\2\2\u00b1\3\2\2\2\2\u00b3\3\2\2\2\2\u00b5")
buf.write("\3\2\2\2\2\u00b7\3\2\2\2\2\u00b9\3\2\2\2\2\u00bb\3\2\2")
buf.write("\2\2\u00bd\3\2\2\2\2\u00bf\3\2\2\2\2\u00c1\3\2\2\2\2\u00c3")
buf.write("\3\2\2\2\2\u00c5\3\2\2\2\2\u00c7\3\2\2\2\2\u00c9\3\2\2")
buf.write("\2\2\u00cb\3\2\2\2\2\u00cd\3\2\2\2\2\u00cf\3\2\2\2\3\u00d3")
buf.write("\3\2\2\2\5\u00d5\3\2\2\2\7\u00d7\3\2\2\2\t\u00d9\3\2\2")
buf.write("\2\13\u00db\3\2\2\2\r\u00dd\3\2\2\2\17\u00df\3\2\2\2\21")
buf.write("\u00e1\3\2\2\2\23\u00e3\3\2\2\2\25\u00e5\3\2\2\2\27\u00e7")
buf.write("\3\2\2\2\31\u00e9\3\2\2\2\33\u00eb\3\2\2\2\35\u00ed\3")
buf.write("\2\2\2\37\u00ef\3\2\2\2!\u00f1\3\2\2\2#\u00f3\3\2\2\2")
buf.write("%\u00f5\3\2\2\2\'\u00f7\3\2\2\2)\u00f9\3\2\2\2+\u00fb")
buf.write("\3\2\2\2-\u00fd\3\2\2\2/\u00ff\3\2\2\2\61\u0101\3\2\2")
buf.write("\2\63\u0103\3\2\2\2\65\u0105\3\2\2\2\67\u0107\3\2\2\2")
buf.write("9\u010b\3\2\2\2;\u0111\3\2\2\2=\u0117\3\2\2\2?\u011f\3")
buf.write("\2\2\2A\u0124\3\2\2\2C\u0129\3\2\2\2E\u012d\3\2\2\2G\u0133")
buf.write("\3\2\2\2I\u0137\3\2\2\2K\u013a\3\2\2\2M\u0141\3\2\2\2")
buf.write("O\u0146\3\2\2\2Q\u014a\3\2\2\2S\u014f\3\2\2\2U\u0153\3")
buf.write("\2\2\2W\u015c\3\2\2\2Y\u0161\3\2\2\2[\u0164\3\2\2\2]\u0167")
buf.write("\3\2\2\2_\u016f\3\2\2\2a\u0175\3\2\2\2c\u0179\3\2\2\2")
buf.write("e\u017d\3\2\2\2g\u0181\3\2\2\2i\u0184\3\2\2\2k\u0187\3")
buf.write("\2\2\2m\u018e\3\2\2\2o\u0198\3\2\2\2q\u01a0\3\2\2\2s\u01a5")
buf.write("\3\2\2\2u\u01ac\3\2\2\2w\u01b3\3\2\2\2y\u01b7\3\2\2\2")
buf.write("{\u01bc\3\2\2\2}\u01bf\3\2\2\2\177\u01c4\3\2\2\2\u0081")
buf.write("\u01ca\3\2\2\2\u0083\u01ce\3\2\2\2\u0085\u01d4\3\2\2\2")
buf.write("\u0087\u01d9\3\2\2\2\u0089\u01db\3\2\2\2\u008b\u01dd\3")
buf.write("\2\2\2\u008d\u01df\3\2\2\2\u008f\u01e1\3\2\2\2\u0091\u01e4")
buf.write("\3\2\2\2\u0093\u01e6\3\2\2\2\u0095\u01e8\3\2\2\2\u0097")
buf.write("\u01ea\3\2\2\2\u0099\u01ec\3\2\2\2\u009b\u01ef\3\2\2\2")
buf.write("\u009d\u01f1\3\2\2\2\u009f\u01f4\3\2\2\2\u00a1\u01f7\3")
buf.write("\2\2\2\u00a3\u01f9\3\2\2\2\u00a5\u01fb\3\2\2\2\u00a7\u01fd")
buf.write("\3\2\2\2\u00a9\u01ff\3\2\2\2\u00ab\u0202\3\2\2\2\u00ad")
buf.write("\u0204\3\2\2\2\u00af\u0207\3\2\2\2\u00b1\u0209\3\2\2\2")
buf.write("\u00b3\u020b\3\2\2\2\u00b5\u020d\3\2\2\2\u00b7\u0210\3")
buf.write("\2\2\2\u00b9\u0212\3\2\2\2\u00bb\u0214\3\2\2\2\u00bd\u0219")
buf.write("\3\2\2\2\u00bf\u0223\3\2\2\2\u00c1\u0228\3\2\2\2\u00c3")
buf.write("\u022f\3\2\2\2\u00c5\u023e\3\2\2\2\u00c7\u0242\3\2\2\2")
buf.write("\u00c9\u0250\3\2\2\2\u00cb\u025b\3\2\2\2\u00cd\u0262\3")
buf.write("\2\2\2\u00cf\u026e\3\2\2\2\u00d1\u0280\3\2\2\2\u00d3\u00d4")
buf.write("\t\2\2\2\u00d4\4\3\2\2\2\u00d5\u00d6\t\3\2\2\u00d6\6\3")
buf.write("\2\2\2\u00d7\u00d8\t\4\2\2\u00d8\b\3\2\2\2\u00d9\u00da")
buf.write("\t\5\2\2\u00da\n\3\2\2\2\u00db\u00dc\t\6\2\2\u00dc\f\3")
buf.write("\2\2\2\u00dd\u00de\t\7\2\2\u00de\16\3\2\2\2\u00df\u00e0")
buf.write("\t\b\2\2\u00e0\20\3\2\2\2\u00e1\u00e2\t\t\2\2\u00e2\22")
buf.write("\3\2\2\2\u00e3\u00e4\t\n\2\2\u00e4\24\3\2\2\2\u00e5\u00e6")
buf.write("\t\13\2\2\u00e6\26\3\2\2\2\u00e7\u00e8\t\f\2\2\u00e8\30")
buf.write("\3\2\2\2\u00e9\u00ea\t\r\2\2\u00ea\32\3\2\2\2\u00eb\u00ec")
buf.write("\t\16\2\2\u00ec\34\3\2\2\2\u00ed\u00ee\t\17\2\2\u00ee")
buf.write("\36\3\2\2\2\u00ef\u00f0\t\20\2\2\u00f0 \3\2\2\2\u00f1")
buf.write("\u00f2\t\21\2\2\u00f2\"\3\2\2\2\u00f3\u00f4\t\22\2\2\u00f4")
buf.write("$\3\2\2\2\u00f5\u00f6\t\23\2\2\u00f6&\3\2\2\2\u00f7\u00f8")
buf.write("\t\24\2\2\u00f8(\3\2\2\2\u00f9\u00fa\t\25\2\2\u00fa*\3")
buf.write("\2\2\2\u00fb\u00fc\t\26\2\2\u00fc,\3\2\2\2\u00fd\u00fe")
buf.write("\t\27\2\2\u00fe.\3\2\2\2\u00ff\u0100\t\30\2\2\u0100\60")
buf.write("\3\2\2\2\u0101\u0102\t\31\2\2\u0102\62\3\2\2\2\u0103\u0104")
buf.write("\t\32\2\2\u0104\64\3\2\2\2\u0105\u0106\t\33\2\2\u0106")
buf.write("\66\3\2\2\2\u0107\u0108\5\3\2\2\u0108\u0109\5\35\17\2")
buf.write("\u0109\u010a\5\t\5\2\u010a8\3\2\2\2\u010b\u010c\5\3\2")
buf.write("\2\u010c\u010d\5%\23\2\u010d\u010e\5%\23\2\u010e\u010f")
buf.write("\5\3\2\2\u010f\u0110\5\63\32\2\u0110:\3\2\2\2\u0111\u0112")
buf.write("\5\5\3\2\u0112\u0113\5\13\6\2\u0113\u0114\5\17\b\2\u0114")
buf.write("\u0115\5\23\n\2\u0115\u0116\5\35\17\2\u0116<\3\2\2\2\u0117")
buf.write("\u0118\5\5\3\2\u0118\u0119\5\37\20\2\u0119\u011a\5\37")
buf.write("\20\2\u011a\u011b\5\31\r\2\u011b\u011c\5\13\6\2\u011c")
buf.write("\u011d\5\3\2\2\u011d\u011e\5\35\17\2\u011e>\3\2\2\2\u011f")
buf.write("\u0120\5\7\4\2\u0120\u0121\5\3\2\2\u0121\u0122\5\'\24")
buf.write("\2\u0122\u0123\5\13\6\2\u0123@\3\2\2\2\u0124\u0125\5\7")
buf.write("\4\2\u0125\u0126\5\21\t\2\u0126\u0127\5\3\2\2\u0127\u0128")
buf.write("\5%\23\2\u0128B\3\2\2\2\u0129\u012a\5\7\4\2\u012a\u012b")
buf.write("\5\21\t\2\u012b\u012c\5%\23\2\u012cD\3\2\2\2\u012d\u012e")
buf.write("\5\7\4\2\u012e\u012f\5\37\20\2\u012f\u0130\5\35\17\2\u0130")
buf.write("\u0131\5\'\24\2\u0131\u0132\5)\25\2\u0132F\3\2\2\2\u0133")
buf.write("\u0134\5\t\5\2\u0134\u0135\5\23\n\2\u0135\u0136\5-\27")
buf.write("\2\u0136H\3\2\2\2\u0137\u0138\5\t\5\2\u0138\u0139\5\37")
buf.write("\20\2\u0139J\3\2\2\2\u013a\u013b\5\t\5\2\u013b\u013c\5")
buf.write("\37\20\2\u013c\u013d\5/\30\2\u013d\u013e\5\35\17\2\u013e")
buf.write("\u013f\5)\25\2\u013f\u0140\5\37\20\2\u0140L\3\2\2\2\u0141")
buf.write("\u0142\5\13\6\2\u0142\u0143\5\31\r\2\u0143\u0144\5\'\24")
buf.write("\2\u0144\u0145\5\13\6\2\u0145N\3\2\2\2\u0146\u0147\5\13")
buf.write("\6\2\u0147\u0148\5\35\17\2\u0148\u0149\5\t\5\2\u0149P")
buf.write("\3\2\2\2\u014a\u014b\5\r\7\2\u014b\u014c\5\23\n\2\u014c")
buf.write("\u014d\5\31\r\2\u014d\u014e\5\13\6\2\u014eR\3\2\2\2\u014f")
buf.write("\u0150\5\r\7\2\u0150\u0151\5\37\20\2\u0151\u0152\5%\23")
buf.write("\2\u0152T\3\2\2\2\u0153\u0154\5\r\7\2\u0154\u0155\5+\26")
buf.write("\2\u0155\u0156\5\35\17\2\u0156\u0157\5\7\4\2\u0157\u0158")
buf.write("\5)\25\2\u0158\u0159\5\23\n\2\u0159\u015a\5\37\20\2\u015a")
buf.write("\u015b\5\35\17\2\u015bV\3\2\2\2\u015c\u015d\5\17\b\2\u015d")
buf.write("\u015e\5\37\20\2\u015e\u015f\5)\25\2\u015f\u0160\5\37")
buf.write("\20\2\u0160X\3\2\2\2\u0161\u0162\5\23\n\2\u0162\u0163")
buf.write("\5\r\7\2\u0163Z\3\2\2\2\u0164\u0165\5\23\n\2\u0165\u0166")
buf.write("\5\35\17\2\u0166\\\3\2\2\2\u0167\u0168\5\23\n\2\u0168")
buf.write("\u0169\5\35\17\2\u0169\u016a\5)\25\2\u016a\u016b\5\13")
buf.write("\6\2\u016b\u016c\5\17\b\2\u016c\u016d\5\13\6\2\u016d\u016e")
buf.write("\5%\23\2\u016e^\3\2\2\2\u016f\u0170\5\31\r\2\u0170\u0171")
buf.write("\5\3\2\2\u0171\u0172\5\5\3\2\u0172\u0173\5\13\6\2\u0173")
buf.write("\u0174\5\31\r\2\u0174`\3\2\2\2\u0175\u0176\5\33\16\2\u0176")
buf.write("\u0177\5\37\20\2\u0177\u0178\5\t\5\2\u0178b\3\2\2\2\u0179")
buf.write("\u017a\5\35\17\2\u017a\u017b\5\23\n\2\u017b\u017c\5\31")
buf.write("\r\2\u017cd\3\2\2\2\u017d\u017e\5\35\17\2\u017e\u017f")
buf.write("\5\37\20\2\u017f\u0180\5)\25\2\u0180f\3\2\2\2\u0181\u0182")
buf.write("\5\37\20\2\u0182\u0183\5\r\7\2\u0183h\3\2\2\2\u0184\u0185")
buf.write("\5\37\20\2\u0185\u0186\5%\23\2\u0186j\3\2\2\2\u0187\u0188")
buf.write("\5!\21\2\u0188\u0189\5\3\2\2\u0189\u018a\5\7\4\2\u018a")
buf.write("\u018b\5\27\f\2\u018b\u018c\5\13\6\2\u018c\u018d\5\t\5")
buf.write("\2\u018dl\3\2\2\2\u018e\u018f\5!\21\2\u018f\u0190\5%\23")
buf.write("\2\u0190\u0191\5\37\20\2\u0191\u0192\5\7\4\2\u0192\u0193")
buf.write("\5\13\6\2\u0193\u0194\5\t\5\2\u0194\u0195\5+\26\2\u0195")
buf.write("\u0196\5%\23\2\u0196\u0197\5\13\6\2\u0197n\3\2\2\2\u0198")
buf.write("\u0199\5!\21\2\u0199\u019a\5%\23\2\u019a\u019b\5\37\20")
buf.write("\2\u019b\u019c\5\17\b\2\u019c\u019d\5%\23\2\u019d\u019e")
buf.write("\5\3\2\2\u019e\u019f\5\33\16\2\u019fp\3\2\2\2\u01a0\u01a1")
buf.write("\5%\23\2\u01a1\u01a2\5\13\6\2\u01a2\u01a3\5\3\2\2\u01a3")
buf.write("\u01a4\5\31\r\2\u01a4r\3\2\2\2\u01a5\u01a6\5%\23\2\u01a6")
buf.write("\u01a7\5\13\6\2\u01a7\u01a8\5\7\4\2\u01a8\u01a9\5\37\20")
buf.write("\2\u01a9\u01aa\5%\23\2\u01aa\u01ab\5\t\5\2\u01abt\3\2")
buf.write("\2\2\u01ac\u01ad\5%\23\2\u01ad\u01ae\5\13\6\2\u01ae\u01af")
buf.write("\5!\21\2\u01af\u01b0\5\13\6\2\u01b0\u01b1\5\3\2\2\u01b1")
buf.write("\u01b2\5)\25\2\u01b2v\3\2\2\2\u01b3\u01b4\5\'\24\2\u01b4")
buf.write("\u01b5\5\13\6\2\u01b5\u01b6\5)\25\2\u01b6x\3\2\2\2\u01b7")
buf.write("\u01b8\5)\25\2\u01b8\u01b9\5\21\t\2\u01b9\u01ba\5\13\6")
buf.write("\2\u01ba\u01bb\5\35\17\2\u01bbz\3\2\2\2\u01bc\u01bd\5")
buf.write(")\25\2\u01bd\u01be\5\37\20\2\u01be|\3\2\2\2\u01bf\u01c0")
buf.write("\5)\25\2\u01c0\u01c1\5\63\32\2\u01c1\u01c2\5!\21\2\u01c2")
buf.write("\u01c3\5\13\6\2\u01c3~\3\2\2\2\u01c4\u01c5\5+\26\2\u01c5")
buf.write("\u01c6\5\35\17\2\u01c6\u01c7\5)\25\2\u01c7\u01c8\5\23")
buf.write("\n\2\u01c8\u01c9\5\31\r\2\u01c9\u0080\3\2\2\2\u01ca\u01cb")
buf.write("\5-\27\2\u01cb\u01cc\5\3\2\2\u01cc\u01cd\5%\23\2\u01cd")
buf.write("\u0082\3\2\2\2\u01ce\u01cf\5/\30\2\u01cf\u01d0\5\21\t")
buf.write("\2\u01d0\u01d1\5\23\n\2\u01d1\u01d2\5\31\r\2\u01d2\u01d3")
buf.write("\5\13\6\2\u01d3\u0084\3\2\2\2\u01d4\u01d5\5/\30\2\u01d5")
buf.write("\u01d6\5\23\n\2\u01d6\u01d7\5)\25\2\u01d7\u01d8\5\21\t")
buf.write("\2\u01d8\u0086\3\2\2\2\u01d9\u01da\7-\2\2\u01da\u0088")
buf.write("\3\2\2\2\u01db\u01dc\7/\2\2\u01dc\u008a\3\2\2\2\u01dd")
buf.write("\u01de\7,\2\2\u01de\u008c\3\2\2\2\u01df\u01e0\7\61\2\2")
buf.write("\u01e0\u008e\3\2\2\2\u01e1\u01e2\7<\2\2\u01e2\u01e3\7")
buf.write("?\2\2\u01e3\u0090\3\2\2\2\u01e4\u01e5\7.\2\2\u01e5\u0092")
buf.write("\3\2\2\2\u01e6\u01e7\7=\2\2\u01e7\u0094\3\2\2\2\u01e8")
buf.write("\u01e9\7<\2\2\u01e9\u0096\3\2\2\2\u01ea\u01eb\7?\2\2\u01eb")
buf.write("\u0098\3\2\2\2\u01ec\u01ed\7>\2\2\u01ed\u01ee\7@\2\2\u01ee")
buf.write("\u009a\3\2\2\2\u01ef\u01f0\7>\2\2\u01f0\u009c\3\2\2\2")
buf.write("\u01f1\u01f2\7>\2\2\u01f2\u01f3\7?\2\2\u01f3\u009e\3\2")
buf.write("\2\2\u01f4\u01f5\7@\2\2\u01f5\u01f6\7?\2\2\u01f6\u00a0")
buf.write("\3\2\2\2\u01f7\u01f8\7@\2\2\u01f8\u00a2\3\2\2\2\u01f9")
buf.write("\u01fa\7*\2\2\u01fa\u00a4\3\2\2\2\u01fb\u01fc\7+\2\2\u01fc")
buf.write("\u00a6\3\2\2\2\u01fd\u01fe\7]\2\2\u01fe\u00a8\3\2\2\2")
buf.write("\u01ff\u0200\7*\2\2\u0200\u0201\7\60\2\2\u0201\u00aa\3")
buf.write("\2\2\2\u0202\u0203\7_\2\2\u0203\u00ac\3\2\2\2\u0204\u0205")
buf.write("\7\60\2\2\u0205\u0206\7+\2\2\u0206\u00ae\3\2\2\2\u0207")
buf.write("\u0208\7`\2\2\u0208\u00b0\3\2\2\2\u0209\u020a\7B\2\2\u020a")
buf.write("\u00b2\3\2\2\2\u020b\u020c\7\60\2\2\u020c\u00b4\3\2\2")
buf.write("\2\u020d\u020e\7\60\2\2\u020e\u020f\7\60\2\2\u020f\u00b6")
buf.write("\3\2\2\2\u0210\u0211\7}\2\2\u0211\u00b8\3\2\2\2\u0212")
buf.write("\u0213\7\177\2\2\u0213\u00ba\3\2\2\2\u0214\u0215\5+\26")
buf.write("\2\u0215\u0216\5\35\17\2\u0216\u0217\5\23\n\2\u0217\u0218")
buf.write("\5)\25\2\u0218\u00bc\3\2\2\2\u0219\u021a\5\23\n\2\u021a")
buf.write("\u021b\5\35\17\2\u021b\u021c\5)\25\2\u021c\u021d\5\13")
buf.write("\6\2\u021d\u021e\5%\23\2\u021e\u021f\5\r\7\2\u021f\u0220")
buf.write("\5\3\2\2\u0220\u0221\5\7\4\2\u0221\u0222\5\13\6\2\u0222")
buf.write("\u00be\3\2\2\2\u0223\u0224\5+\26\2\u0224\u0225\5\'\24")
buf.write("\2\u0225\u0226\5\13\6\2\u0226\u0227\5\'\24\2\u0227\u00c0")
buf.write("\3\2\2\2\u0228\u0229\5\'\24\2\u0229\u022a\5)\25\2\u022a")
buf.write("\u022b\5%\23\2\u022b\u022c\5\23\n\2\u022c\u022d\5\35\17")
buf.write("\2\u022d\u022e\5\17\b\2\u022e\u00c2\3\2\2\2\u022f\u0230")
buf.write("\5\23\n\2\u0230\u0231\5\33\16\2\u0231\u0232\5!\21\2\u0232")
buf.write("\u0233\5\31\r\2\u0233\u0234\5\13\6\2\u0234\u0235\5\33")
buf.write("\16\2\u0235\u0236\5\13\6\2\u0236\u0237\5\35\17\2\u0237")
buf.write("\u0238\5)\25\2\u0238\u0239\5\3\2\2\u0239\u023a\5)\25\2")
buf.write("\u023a\u023b\5\23\n\2\u023b\u023c\5\37\20\2\u023c\u023d")
buf.write("\5\35\17\2\u023d\u00c4\3\2\2\2\u023e\u023f\t\34\2\2\u023f")
buf.write("\u0240\3\2\2\2\u0240\u0241\bc\2\2\u0241\u00c6\3\2\2\2")
buf.write("\u0242\u0243\7*\2\2\u0243\u0244\7,\2\2\u0244\u0248\3\2")
buf.write("\2\2\u0245\u0247\13\2\2\2\u0246\u0245\3\2\2\2\u0247\u024a")
buf.write("\3\2\2\2\u0248\u0249\3\2\2\2\u0248\u0246\3\2\2\2\u0249")
buf.write("\u024b\3\2\2\2\u024a\u0248\3\2\2\2\u024b\u024c\7,\2\2")
buf.write("\u024c\u024d\7+\2\2\u024d\u024e\3\2\2\2\u024e\u024f\b")
buf.write("d\2\2\u024f\u00c8\3\2\2\2\u0250\u0254\7}\2\2\u0251\u0253")
buf.write("\13\2\2\2\u0252\u0251\3\2\2\2\u0253\u0256\3\2\2\2\u0254")
buf.write("\u0255\3\2\2\2\u0254\u0252\3\2\2\2\u0255\u0257\3\2\2\2")
buf.write("\u0256\u0254\3\2\2\2\u0257\u0258\7\177\2\2\u0258\u0259")
buf.write("\3\2\2\2\u0259\u025a\be\2\2\u025a\u00ca\3\2\2\2\u025b")
buf.write("\u025f\t\35\2\2\u025c\u025e\t\36\2\2\u025d\u025c\3\2\2")
buf.write("\2\u025e\u0261\3\2\2\2\u025f\u025d\3\2\2\2\u025f\u0260")
buf.write("\3\2\2\2\u0260\u00cc\3\2\2\2\u0261\u025f\3\2\2\2\u0262")
buf.write("\u0268\7)\2\2\u0263\u0264\7)\2\2\u0264\u0267\7)\2\2\u0265")
buf.write("\u0267\n\37\2\2\u0266\u0263\3\2\2\2\u0266\u0265\3\2\2")
buf.write("\2\u0267\u026a\3\2\2\2\u0268\u0266\3\2\2\2\u0268\u0269")
buf.write("\3\2\2\2\u0269\u026b\3\2\2\2\u026a\u0268\3\2\2\2\u026b")
buf.write("\u026c\7)\2\2\u026c\u00ce\3\2\2\2\u026d\u026f\4\62;\2")
buf.write("\u026e\u026d\3\2\2\2\u026f\u0270\3\2\2\2\u0270\u026e\3")
buf.write("\2\2\2\u0270\u0271\3\2\2\2\u0271\u027e\3\2\2\2\u0272\u0274")
buf.write("\7\60\2\2\u0273\u0275\4\62;\2\u0274\u0273\3\2\2\2\u0275")
buf.write("\u0276\3\2\2\2\u0276\u0274\3\2\2\2\u0276\u0277\3\2\2\2")
buf.write("\u0277\u0279\3\2\2\2\u0278\u027a\5\u00d1i\2\u0279\u0278")
buf.write("\3\2\2\2\u0279\u027a\3\2\2\2\u027a\u027c\3\2\2\2\u027b")
buf.write("\u0272\3\2\2\2\u027b\u027c\3\2\2\2\u027c\u027f\3\2\2\2")
buf.write("\u027d\u027f\5\u00d1i\2\u027e\u027b\3\2\2\2\u027e\u027d")
buf.write("\3\2\2\2\u027f\u00d0\3\2\2\2\u0280\u0282\7g\2\2\u0281")
buf.write("\u0283\t \2\2\u0282\u0281\3\2\2\2\u0282\u0283\3\2\2\2")
buf.write("\u0283\u0285\3\2\2\2\u0284\u0286\4\62;\2\u0285\u0284\3")
buf.write("\2\2\2\u0286\u0287\3\2\2\2\u0287\u0285\3\2\2\2\u0287\u0288")
buf.write("\3\2\2\2\u0288\u00d2\3\2\2\2\17\2\u0248\u0254\u025f\u0266")
buf.write("\u0268\u0270\u0276\u0279\u027b\u027e\u0282\u0287\3\b\2")
buf.write("\2")
return buf.getvalue()
class pascalLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
AND = 1
ARRAY = 2
BEGIN = 3
BOOLEAN = 4
CASE = 5
CHAR = 6
CHR = 7
CONST = 8
DIV = 9
DO = 10
DOWNTO = 11
ELSE = 12
END = 13
FILE = 14
FOR = 15
FUNCTION = 16
GOTO = 17
IF = 18
IN = 19
INTEGER = 20
LABEL = 21
MOD = 22
NIL = 23
NOT = 24
OF = 25
OR = 26
PACKED = 27
PROCEDURE = 28
PROGRAM = 29
REAL = 30
RECORD = 31
REPEAT = 32
SET = 33
THEN = 34
TO = 35
TYPE = 36
UNTIL = 37
VAR = 38
WHILE = 39
WITH = 40
PLUS = 41
MINUS = 42
STAR = 43
SLASH = 44
ASSIGN = 45
COMMA = 46
SEMI = 47
COLON = 48
EQUAL = 49
NOT_EQUAL = 50
LT = 51
LE = 52
GE = 53
GT = 54
LPAREN = 55
RPAREN = 56
LBRACK = 57
LBRACK2 = 58
RBRACK = 59
RBRACK2 = 60
POINTER = 61
AT = 62
DOT = 63
DOTDOT = 64
LCURLY = 65
RCURLY = 66
UNIT = 67
INTERFACE = 68
USES = 69
STRING = 70
IMPLEMENTATION = 71
WS = 72
COMMENT_1 = 73
COMMENT_2 = 74
IDENT = 75
STRING_LITERAL = 76
NUM_INT = 77
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'+'", "'-'", "'*'", "'/'", "':='", "','", "';'", "':'", "'='",
"'<>'", "'<'", "'<='", "'>='", "'>'", "'('", "')'", "'['", "'(.'",
"']'", "'.)'", "'^'", "'@'", "'.'", "'..'", "'{'", "'}'" ]
symbolicNames = [ "<INVALID>",
"AND", "ARRAY", "BEGIN", "BOOLEAN", "CASE", "CHAR", "CHR", "CONST",
"DIV", "DO", "DOWNTO", "ELSE", "END", "FILE", "FOR", "FUNCTION",
"GOTO", "IF", "IN", "INTEGER", "LABEL", "MOD", "NIL", "NOT",
"OF", "OR", "PACKED", "PROCEDURE", "PROGRAM", "REAL", "RECORD",
"REPEAT", "SET", "THEN", "TO", "TYPE", "UNTIL", "VAR", "WHILE",
"WITH", "PLUS", "MINUS", "STAR", "SLASH", "ASSIGN", "COMMA",
"SEMI", "COLON", "EQUAL", "NOT_EQUAL", "LT", "LE", "GE", "GT",
"LPAREN", "RPAREN", "LBRACK", "LBRACK2", "RBRACK", "RBRACK2",
"POINTER", "AT", "DOT", "DOTDOT", "LCURLY", "RCURLY", "UNIT",
"INTERFACE", "USES", "STRING", "IMPLEMENTATION", "WS", "COMMENT_1",
"COMMENT_2", "IDENT", "STRING_LITERAL", "NUM_INT" ]
ruleNames = [ "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K",
"L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z", "AND", "ARRAY", "BEGIN", "BOOLEAN",
"CASE", "CHAR", "CHR", "CONST", "DIV", "DO", "DOWNTO",
"ELSE", "END", "FILE", "FOR", "FUNCTION", "GOTO", "IF",
"IN", "INTEGER", "LABEL", "MOD", "NIL", "NOT", "OF", "OR",
"PACKED", "PROCEDURE", "PROGRAM", "REAL", "RECORD", "REPEAT",
"SET", "THEN", "TO", "TYPE", "UNTIL", "VAR", "WHILE",
"WITH", "PLUS", "MINUS", "STAR", "SLASH", "ASSIGN", "COMMA",
"SEMI", "COLON", "EQUAL", "NOT_EQUAL", "LT", "LE", "GE",
"GT", "LPAREN", "RPAREN", "LBRACK", "LBRACK2", "RBRACK",
"RBRACK2", "POINTER", "AT", "DOT", "DOTDOT", "LCURLY",
"RCURLY", "UNIT", "INTERFACE", "USES", "STRING", "IMPLEMENTATION",
"WS", "COMMENT_1", "COMMENT_2", "IDENT", "STRING_LITERAL",
"NUM_INT", "EXPONENT" ]
grammarFileName = "pascal.g4"
def __init__(self, input=None):
super().__init__(input)
self.checkVersion("4.5.3")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import str
from builtins import *
from past.utils import old_div
import pandas as pd
import folium.folium as folium
import itertools
import numpy as np
import logging
import geojson as gj
import copy
import attrdict as ad
# import emission.analysis.classification.cleaning.location_smoothing as ls
import bson.json_util as bju
import emission.storage.decorations.location_queries as lq
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.place_queries as esdp
import emission.storage.decorations.stop_queries as esds
import emission.storage.decorations.section_queries as esdsc
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.section as ecwsc
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import folium
import emission.storage.timeseries.timequery as estt
import emission.net.api.usercache as enau
all_color_list = ['black', 'brown', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'navy', 'pink', 'purple', 'red', 'snow', 'yellow']
sel_color_list = ['black', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'pink', 'purple', 'red', 'yellow']
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
# print "Converting df with size %s to string list" % df.shape[0]
array_list = df.to_dict(orient='records')
return [str(line) for line in array_list]
def get_maps_for_range(user_id, start_ts, end_ts):
map_list = []
geojson_list = gfc.get_geojson_for_ts(user_id, start_ts, end_ts)
return get_maps_for_geojson_list(geojson_list)
def get_maps_for_usercache(user_id):
from functional import seq
data_to_phone = seq(enau.sync_server_to_phone(user_id))
logging.debug("Before pipeline, trips to phone list has length %d" % len(data_to_phone.to_list()))
logging.debug("keys are %s" % data_to_phone.map(lambda e: ad.AttrDict(e).metadata.key))
trips_to_phone = data_to_phone.map(lambda e: ad.AttrDict(e))\
.filter(lambda e: e.metadata.key.startswith("diary/trips")) \
.map(lambda e: e.data)
logging.debug("After pipeline, trips to phone list has length %d" % len(trips_to_phone.to_list()))
# logging.debug("trips_to_phone = %s" % trips_to_phone)
maps_for_day = []
for day in trips_to_phone:
maps_for_day.append(get_maps_for_geojson_list(day))
return maps_for_day
def get_maps_for_geojson_trip_list(trip_geojson_list):
map_list = []
for trip_doc in trip_geojson_list:
curr_map = get_map_for_geojson_trip(trip_doc)
map_list.append(curr_map)
return map_list
def get_map_for_geojson_trip(geojson_trip):
m = folium.Map()
location_points=[]
for f in geojson_trip["features"]:
if f["type"] == "Feature":
print(f["properties"]["feature_type"], f["id"])
if f["properties"]["feature_type"] == 'start_place':
place_marker = get_place_ui(f)
ic = folium.features.Icon(color='green', icon="flag")
place_marker.add_child(ic)
place_marker.add_to(m)
if f["properties"]["feature_type"] == 'end_place':
place_marker = get_place_ui(f)
ic = folium.features.Icon(color='red', icon="flag")
place_marker.add_child(ic)
place_marker.add_to(m)
if f["properties"]["feature_type"] == 'stop':
(start_marker, end_marker) = get_stop_ui(f)
start_marker.add_to(m)
end_marker.add_to(m)
if f["type"] == "FeatureCollection":
for section in f["features"]:
print(section["properties"]["feature_type"], section["id"])
if (section["properties"]["feature_type"] == "section"):
section_line = get_section_ui(section)
location_points.extend(section_line.locations)
section_line.add_to(m)
else:
raise NotImplementedException()
temp_polyline = folium.PolyLine(location_points)
m.fit_bounds(temp_polyline.get_bounds())
return m
def get_place_ui(place):
return folium.Marker(place["geometry"]["coordinates"][::-1], popup=bju.dumps(place["properties"]))
def get_section_ui(section):
lat_lng_points = list((p[::-1] for p in section["geometry"]["coordinates"]))
return folium.PolyLine(lat_lng_points, popup=bju.dumps(section["properties"]))
def get_stop_ui(stop):
lat_lng_points = list((p[::-1] for p in stop["geometry"]["coordinates"]))
return (folium.CircleMarker(lat_lng_points[0], popup=bju.dumps(stop["properties"]), color="green", fill_color="green", fill=True),
folium.CircleMarker(lat_lng_points[1], popup=bju.dumps(stop["properties"]), color="red", fill_color="red", fill=True))
def flipped(coord):
return (coord[1], coord[0])
def get_center_for_map(coords):
# logging.debug(trip_geojson)
midpoint = lambda p1_p21: [old_div((p1_p21[0][0] + p1_p21[1][0]),2),
old_div((p1_p21[0][1] + p1_p21[1][1]),2)]
if len(coords) == 0:
return None
if len(coords) == 1:
return flipped(coords)
if len(coords) > 0:
logging.debug("Getting midpoint of %s and %s" % (coords[0], coords[-1]))
return flipped(midpoint((coords[0], coords[-1])))
def get_maps_for_geojson_unsectioned(geojson_list):
map_list = []
for geojson in geojson_list:
map_list.append(get_map_for_geojson_unsectioned(geojson))
return map_list
def get_map_for_geojson_unsectioned(geojson):
div_icon = folium.DivIcon()
all_div_markers = [folium.CircleMarker(p["geometry"]["coordinates"][::-1],
popup=bju.dumps(p["properties"]),
radius=5)
for p in geojson["features"][0]["features"]]
# all_div_markers = [folium.Marker(p["geometry"]["coordinates"][::-1],
# popup=json.dumps(p["properties"]),
# icon=div_icon)
# for p in all_geojson["features"][0]["features"]]
print("Points are ", [m.location for m in all_div_markers[:5]], "...")
geojson_line_string = geojson["features"][1]["geometry"]
polyline_coords = [c[::-1] for c in geojson_line_string["coordinates"]]
print("Linestring is", polyline_coords[:5], "...")
polyline = folium.PolyLine(polyline_coords)
bounds = polyline.get_bounds()
m = folium.Map(tiles='Stamen Terrain')
m.fit_bounds(bounds)
for marker in all_div_markers:
marker.add_to(m)
polyline.add_to(m)
return m
def get_coords(feature):
# logging.debug("Getting coordinates for feature %s" % bju.dumps(feature))
if feature["type"] == "FeatureCollection":
retVal = []
for f in feature["features"]:
retVal.extend(get_coords(f))
return retVal
else:
return gj.utils.coords(feature)
def get_maps_for_range_old(user_id, start_ts, end_ts):
# First, get the timeline for that range.
ts = esta.TimeSeries.get_time_series(user_id)
trip_list = esdt.get_trips(user_id, estt.TimeQuery("data.start_ts", start_ts, end_ts))
# TODO: Should the timeline support random access as well?
# If it did, we wouldn't need this additional map
# I think that it would be good to support a doubly linked list, i.e. prev and next in addition
# to the iteration interface
place_list = esdp.get_places(user_id, estt.TimeQuery("data.exit_ts", start_ts, end_ts))
place_list = place_list + (esdp.get_places(user_id, estt.TimeQuery("data.enter_ts", start_ts, end_ts)))
place_map = dict([(p.get_id(), p) for p in place_list])
map_list = []
flipped_midpoint = lambda p1_p22: [old_div((p1_p22[0].coordinates[1] + p1_p22[1].coordinates[1]),2),
old_div((p1_p22[0].coordinates[0] + p1_p22[1].coordinates[0]),2)]
for i, trip in enumerate(trip_list):
logging.debug("-" * 20 + trip.start_fmt_time + "=>" + trip.end_fmt_time
+ "(" + str(trip.end_ts - trip.start_ts) + ")")
if (len(esdt.get_raw_sections_for_trip(user_id, trip.get_id())) == 0 and
len(esdt.get_raw_stops_for_trip(user_id, trip.get_id())) == 0):
logging.debug("Skipping trip because it has no stops and no sections")
continue
start_point = gj.GeoJSON.to_instance(trip.start_loc)
end_point = gj.GeoJSON.to_instance(trip.end_loc)
curr_map = folium.Map(flipped_midpoint((start_point, end_point)))
map_list.append(curr_map)
logging.debug("About to display places %s and %s" % (trip.start_place, trip.end_place))
update_place(curr_map, trip.start_place, place_map, marker_color='green')
update_place(curr_map, trip.end_place, place_map, marker_color='red')
# TODO: Should get_timeline_for_trip work on a trip_id or on a trip object
# it seems stupid to convert trip object -> id -> trip object
curr_trip_timeline = esdt.get_raw_timeline_for_trip(user_id, trip.get_id())
for i, trip_element in enumerate(curr_trip_timeline):
# logging.debug("Examining element %s of type %s" % (trip_element, type(trip_element)))
if type(trip_element) == ecws.Stop:
time_query = esds.get_time_query_for_stop(trip_element.get_id())
logging.debug("time_query for stop %s = %s" % (trip_element, time_query))
stop_points_df = ts.get_data_df("background/filtered_location", time_query)
# logging.debug("stop_points_df.head() = %s" % stop_points_df.head())
if len(stop_points_df) > 0:
update_line(curr_map, stop_points_df, line_color = sel_color_list[-1],
popup="%s -> %s" % (trip_element.enter_fmt_time, trip_element.exit_fmt_time))
else:
assert(type(trip_element) == ecwsc.Section)
time_query = esdsc.get_time_query_for_section(trip_element.get_id())
logging.debug("time_query for section %s = %s" %
(trip_element, "[%s,%s,%s]" % (time_query.timeType, time_query.startTs, time_query.endTs)))
section_points_df = ts.get_data_df("background/filtered_location", time_query)
logging.debug("section_points_df.tail() = %s" % section_points_df.tail())
if len(section_points_df) > 0:
update_line(curr_map, section_points_df, line_color = sel_color_list[trip_element.sensed_mode.value],
popup="%s (%s -> %s)" % (trip_element.sensed_mode, trip_element.start_fmt_time,
trip_element.end_fmt_time))
else:
logging.warning("found no points for section %s" % trip_element)
return map_list
def update_place(curr_map, place_id, place_map, marker_color='blue'):
if place_id is not None and place_id in place_map:
place = place_map[place_id]
logging.debug("Retrieved place %s" % place)
if hasattr(place, "location"):
coords = copy.copy(place.location.coordinates)
coords.reverse()
logging.debug("Displaying place at %s" % coords)
curr_map.simple_marker(location=coords, popup=str(place), marker_color=marker_color)
else:
logging.debug("starting place has no location, skipping")
else:
logging.warning("place not mapped because place_id = %s and place_id in place_map = %s" % (place_id, place_id in place_map))
def update_line(currMap, line_points, line_color = None, popup=None):
currMap.div_markers(line_points[['latitude', 'longitude']].to_numpy().tolist(),
df_to_string_list(line_points), marker_size=5)
currMap.line(line_points[['latitude', 'longitude']].to_numpy().tolist(),
line_color = line_color,
popup = popup)
##########################
# Everything below this line is from the time when we were evaluating
# segmentation and can potentially be deleted. It is also likely to have bitrotted.
# Let's hold off a bit on that until we have the replacement, though
##########################
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
print("Considering trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end))
if end - start < 4:
# If there are only 3 entries, that means that there is only one
# point other than the start and the end, bail
print("Ignoring trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end))
continue
mapList.append(get_map(trip))
return mapList
def get_map_list_after_segmentation(section_map, outlier_algo = None, filter_algo = None):
mapList = []
for trip, section_list in section_map:
logging.debug("%s %s -> %s %s" % ("=" * 20, trip.start_time, trip.end_time, "=" * 20))
trip_df = lq.get_points_for_section(trip)
curr_map = folium.Map([trip_df.mLatitude.mean(), trip_df.mLongitude.mean()])
last_section_end = None
for (i, section) in enumerate(section_list):
logging.debug("%s %s: %s -> %s %s" %
("-" * 20, i, section.start_time, section.end_time, "-" * 20))
raw_section_df = trip_df[np.logical_and(trip_df.mTime >= section.start_ts,
trip_df.mTime <= section.end_ts)]
section_df = ls.filter_points(raw_section_df, outlier_algo, filter_algo)
if section_df.shape[0] == 0:
logging.info("Found empty df! skipping...")
continue
logging.debug("for section %s, section_df.shape = %s, formatted_time.head() = %s" %
(section, section_df.shape, section_df["formatted_time"].head()))
update_map(curr_map, section_df, line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity))
if section_df.shape[0] > 0:
curr_section_start = section_df.iloc[0]
if i != 0 and last_section_end is not None:
# We want to join this to the previous section.
curr_map.line([[last_section_end.mLatitude, last_section_end.mLongitude],
[curr_section_start.mLatitude, curr_section_start.mLongitude]],
line_color = sel_color_list[-1],
popup = "%s -> %s" % (section_list[i-1].activity, section.activity))
last_section_end = section_df.iloc[-1]
mapList.append(curr_map)
return mapList
def get_map(section_points, line_color = None, popup=None):
currMap = folium.Map([section_points.mLatitude.mean(), section_points.mLongitude.mean()])
update_map(currMap, section_points, line_color, popup)
return currMap
def update_map(currMap, section_points, line_color = None, popup=None):
currMap.div_markers(section_points[['mLatitude', 'mLongitude']].to_numpy().tolist(),
df_to_string_list(section_points), marker_size=5)
currMap.line(section_points[['mLatitude', 'mLongitude']].to_numpy().tolist(),
line_color = line_color,
popup = popup)
def evaluate_filtering(section_list, outlier_algos, filtering_algos):
"""
TODO: Is this the best place for this? If not, what is?
It almost seems like we need to have a separate evaluation module that is
separate from the plotting and the calculation modules.
But then, what is the purpose of this module?
"""
nCols = 2 + len(outlier_algos) * len(filtering_algos)
nRows = len(section_list)
map_list = []
for section in section_list:
curr_compare_list = []
section_df = ls.get_section_points(section)
curr_compare_list.append(get_map(section_df))
curr_compare_list.append(get_map(ls.filter_points(section_df, None, None)))
for (oa, fa) in itertools.product(outlier_algos, filtering_algos):
curr_filtered_df = ls.filter_points(section_df, oa, fa)
print ("After filtering with %s, %s, size is %s" % (oa, fa, curr_filtered_df.shape))
if "activity" in section:
curr_compare_list.append(get_map(curr_filtered_df,
line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity)))
else:
curr_compare_list.append(get_map(curr_filtered_df))
assert(len(curr_compare_list) == nCols)
map_list.append(curr_compare_list)
assert(len(map_list) == nRows)
return map_list
| |
import os
import time
import io
import math
import re
try:
from urllib.parse import urlparse
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urllib2 import urlopen, urlparse, Request, HTTPError
class UnknownContentLengthException(Exception): pass
class InvalidChecksumException(Exception): pass
class UnsupportedHTTPCodeException(Exception): pass
class InvalidOffsetException(Exception): pass
class MissingChecksumHeader(Exception): pass
CHUNK_SIZE = 16 * 1024
class RateSampler(object):
def __init__(self, period=1):
self.rate = None
self.reset = True
self.period = period
def __enter__(self):
if self.reset:
self.reset = False
self.start = time.time()
self.counter = 0
def __exit__(self, type, value, traceback):
elapsed = time.time() - self.start
if elapsed >= self.period:
self.reset = True
self.rate = float(self.counter) / elapsed
def update(self, value):
self.counter += value
def format(self, unit="MB"):
if self.rate is None:
return None
divisor = {'MB': 1048576, 'kB': 1024}
return "%0.2f%s/s" % (self.rate / divisor[unit], unit)
class TimeEstimator(object):
def __init__(self, cooldown=1):
self.cooldown = cooldown
self.start = time.time()
self.time_left = None
def update(self, bytes_read, total_size):
elapsed = time.time() - self.start
if elapsed > self.cooldown:
self.time_left = math.ceil(elapsed * total_size /
bytes_read - elapsed)
def format(self):
if self.time_left is None:
return None
res = "eta "
if self.time_left / 60 >= 1:
res += "%dm " % (self.time_left / 60)
return res + "%ds" % (self.time_left % 60)
def format_bytes_read(bytes_read, unit="MB"):
divisor = {'MB': 1048576, 'kB': 1024}
return "%0.2f%s" % (float(bytes_read) / divisor[unit], unit)
def format_percent(bytes_read, total_size):
percent = round(bytes_read * 100.0 / total_size, 2)
return "%0.2f%%" % percent
def get_content_range(response):
content_range = response.headers.get('Content-Range', "").strip()
if content_range:
m = re.match(r"bytes (\d+)-(\d+)/(\d+)", content_range)
if m:
return [int(v) for v in m.groups()]
def get_content_length(response):
if 'Content-Length' not in response.headers:
raise UnknownContentLengthException
return int(response.headers.get('Content-Length').strip())
def get_url_meta(url, checksum_header=None):
class HeadRequest(Request):
def get_method(self):
return "HEAD"
r = urlopen(HeadRequest(url))
res = {'size': get_content_length(r)}
if checksum_header:
value = r.headers.get(checksum_header)
if value:
res['checksum'] = value
r.close()
return res
def progress(console, bytes_read, total_size, transfer_rate, eta):
fields = [
format_bytes_read(bytes_read),
format_percent(bytes_read, total_size),
transfer_rate.format(),
eta.format(),
" " * 10,
]
console.write("Downloaded %s\r" % " ".join(filter(None, fields)))
console.flush()
def read_request(request, offset=0, console=None,
progress_func=None, write_func=None):
# support partial downloads
if offset > 0:
request.add_header('Range', "bytes=%s-" % offset)
try:
response = urlopen(request)
except HTTPError as e:
if e.code == 416: # Requested Range Not Satisfiable
raise InvalidOffsetException
# TODO add http error handling here
raise UnsupportedHTTPCodeException(e.code)
total_size = get_content_length(response) + offset
bytes_read = offset
# sanity checks
if response.code == 200: # OK
assert offset == 0
elif response.code == 206: # Partial content
range_start, range_end, range_total = get_content_range(response)
assert range_start == offset
assert range_total == total_size
assert range_end + 1 - range_start == total_size - bytes_read
else:
raise UnsupportedHTTPCodeException(response.code)
eta = TimeEstimator()
transfer_rate = RateSampler()
if console:
if offset > 0:
console.write("Continue downloading...\n")
else:
console.write("Downloading...\n")
while True:
with transfer_rate:
chunk = response.read(CHUNK_SIZE)
if not chunk:
if progress_func and console:
console.write('\n')
break
bytes_read += len(chunk)
transfer_rate.update(len(chunk))
eta.update(bytes_read - offset, total_size - offset)
if progress_func and console:
progress_func(console, bytes_read, total_size, transfer_rate, eta)
if write_func:
write_func(chunk)
response.close()
assert bytes_read == total_size
return response
def download(url, path=".",
checksum=None, checksum_header=None,
headers=None, console=None):
if os.path.isdir(path):
path = os.path.join(path, url.rsplit('/', 1)[1])
path = os.path.abspath(path)
with io.open(path, "a+b") as f:
size = f.tell()
# update checksum of partially downloaded file
if checksum:
f.seek(0, os.SEEK_SET)
for chunk in iter(lambda: f.read(CHUNK_SIZE), b""):
checksum.update(chunk)
def write(chunk):
if checksum:
checksum.update(chunk)
f.write(chunk)
request = Request(url)
# request headers
if headers:
for key, value in headers.items():
request.add_header(key, value)
try:
response = read_request(request,
offset=size,
console=console,
progress_func=progress,
write_func=write)
except InvalidOffsetException:
response = None
if checksum:
if response:
origin_checksum = response.headers.get(checksum_header)
else:
# check whether file is already complete
meta = get_url_meta(url, checksum_header)
origin_checksum = meta.get('checksum')
if origin_checksum is None:
raise MissingChecksumHeader
if checksum.hexdigest() != origin_checksum:
raise InvalidChecksumException
if console:
console.write("checksum/sha256 OK\n")
return path
| |
import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from .topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as,
of course, the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = []
self.old_proxy_keys = []
self.old_unmanaged_keys = []
self.new_model_keys = []
self.new_proxy_keys = []
self.new_unmanaged_keys = []
for al, mn in sorted(self.from_state.models):
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.append((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.append((al, mn))
else:
self.old_model_keys.append((al, mn))
for al, mn in sorted(self.to_state.models):
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.append((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.append((al, mn))
else:
self.new_model_keys.append((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
# Generate index removal operations before field is removed
self.generate_removed_indexes()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys)
self.through_users = {}
self.old_field_keys = set()
self.new_field_keys = set()
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields)
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
is_swappable_dep = False
if dep[0] == "__setting__":
# We need to temporarily resolve the swappable dependency to prevent
# circular references. While keeping the dependency checks on the
# resolved model we still add the swappable dependencies.
# See #23322
resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.')
original_dep = dep
dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3])
is_swappable_dep = True
if dep[0] != app_label and dep[0] != "__setting__":
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
self.generated_operations[app_label] = self.generated_operations[app_label][1:]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency, ))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = set(self.new_model_keys) - set(self.old_model_keys)
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = set(self.old_model_keys) - set(self.new_model_keys)
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
)
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.append((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop('indexes')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name, field in sorted(related_fields.items())
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but of course for proxy
models it's safe to skip all the pointless field stuff and just chuck
out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
deleted_models = set(self.old_model_keys) - new_keys
deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
if not model._meta.managed:
# Skip here, no need to handle fields for unmanaged models
continue
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name, field in sorted(related_fields.items()):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name, field in sorted(related_fields.items()):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name))
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
if old_field_dec == field_dec:
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
preserve_default = True
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
if (not field.null and not field.has_default() and
not field.many_to_many and
not (field.blank and field.empty_strings_allowed) and
not (isinstance(field, time_fields) and field.auto_now)):
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
preserve_default = False
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn's possible.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
)
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name) or set()
if old_value:
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
new_value = new_model_state.options.get(option_name) or set()
if new_value:
new_value = set(new_value)
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
).union(
self.kept_unmanaged_keys
).union(
# unmanaged converted to managed
set(self.old_unmanaged_keys).intersection(self.new_model_keys)
).union(
# managed converted to unmanaged
set(self.old_model_keys).intersection(self.new_unmanaged_keys)
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = dict(
option for option in old_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
new_options = dict(
option for option in new_model_state.options.items()
if option[0] in AlterModelOptions.ALTER_OPTION_KEYS
)
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggest a name for the migration they might
represent. Names are not guaranteed to be unique, but put some effort
into the fallback name to avoid VCS conflicts if possible.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif len(ops) > 1:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. If no number is found, return None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Matplotlib (inline) plotter
===========================
This utility is a pure matplotlib visualizer/plotter (no interaction!, no
widgets). It is intended to be used in Jupyter notebooks (%inline or %notebook).
"""
from itertools import cycle
from copy import deepcopy
from os import path
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sloth.utils.logging import getLogger
class Plotter(object):
#: default colors palette
_DEFAULT_COLORS = (
"#1F77B4",
"#AEC7E8",
"#FF7F0E",
"#FFBB78",
"#2CA02C",
"#98DF8A",
"#D62728",
"#FF9896",
"#9467BD",
"#C5B0D5",
"#8C564B",
"#C49C94",
"#E377C2",
"#F7B6D2",
"#7F7F7F",
"#C7C7C7",
"#BCBD22",
"#DBDB8D",
"#17BECF",
"#9EDAE5",
)
#: default plot labels
_labels = dict(x="X", y="Y")
def __init__(
self,
name="plotter",
dpi=150,
figsize=(10, 10),
ncols=1,
nrows=1,
fontsize=6,
axes_linewidth=1,
lines_linewidth=1.5,
style="seaborn-paper",
usetex=False,
title=None,
titles=None,
logger=None,
outdir=None,
sharex=False,
sharey=False,
):
"""Logger Constructor
Parameters
----------
name : str
name of the figure ['plotter']
dpi : int
figure dpi [150]
figsize : tuple
sigure size [(10, 10)]
fontsize : int
base font size [6]
axes_linewidth : float
base axes width [0.5]
lines_linewidth : float
base lines width [1.5]
style : str
https://matplotlib.org/gallery/style_sheets/style_sheets_reference.html
usetex : bool
use TeX
title : None or str
figure main title [None -> use self._name]
titles : None or list
list of titles for the subplots [None -> 'win=#']
logger : instance of getLogger
outdir : str
path for saving figures [None]
"""
#: general
self._name = name
self._logger = logger or getLogger(self._name)
self._title = title or self._name
self._suptitle = title or f"Fig: {self._title}"
self._titles = titles
#: matplotlib rcParams
self._usetex = usetex
self._fontsize = fontsize
self._axes_linewidth = axes_linewidth
self._lines_linewidth = lines_linewidth
self._style = style
#: figure/axes parameters
self._dpi = dpi
self._figsize = figsize
self._ncols = ncols
self._nrows = nrows
self._nplots = self._nrows * self._ncols
#: input/output
self._outdir = outdir
#: plotting
self._initColors()
self.set_style(self._style)
self._init_matplotlib()
self._init_subplots(sharex=sharex, sharey=sharey)
def _init_matplotlib(self, **kws):
"""init default Matplotlib parameters"""
plt.ion()
self._rc = {
"text.usetex": self._usetex,
"figure.dpi": self._dpi,
"figure.figsize": self._figsize,
"font.size": self._fontsize,
"axes.titlesize": "medium",
"axes.linewidth": self._axes_linewidth,
"xtick.major.width": self._axes_linewidth,
"ytick.major.width": self._axes_linewidth,
"lines.linewidth": self._lines_linewidth,
"grid.alpha": 0.5,
}
rcParams.update(self._rc)
self._rc = deepcopy(rcParams)
def set_style(self, style=None):
"""Set matplotlib style (reset to default if not given)"""
plt.rcdefaults()
if style is not None:
plt.style.use(style)
def _update_matplotlib(self, rcpars):
"""Update matplotlib base settings
Parameters
----------
rcpars : dict
dictionary to update matplotlib.rcParams
"""
self._init_matplotlib() #: first reset to defaults
if rcpars is not None:
assert type(rcpars) is dict, "'rcpars' should be a dictionary"
rcParams.update(rcpars)
#: store updated parameters
self._rc = deepcopy(rcParams)
def _init_subplots(self, sharex=False, sharey=False):
"""instantiate figure and subplots"""
plt.close(self._name)
self._fig, _axs = plt.subplots(
num=self._name,
ncols=self._ncols,
nrows=self._nrows,
dpi=self._dpi,
figsize=self._figsize,
sharex=sharex,
sharey=sharey,
)
#: reshape Axes as list
self._axs = np.array(_axs).reshape(self._nplots)
# self._axs2 = np.full_like(self._axs, None)
self._fig.suptitle(self._suptitle, fontsize=self._fontsize + 4)
if self._titles is not None:
self._initPlotsTitle(self._titles)
def _initPlotsTitle(self, titles=None):
"""init title for all subplots"""
if titles is None:
self._titles = ["win={0}".format(i) for i in range(self._nplots)]
else:
assert type(titles) is list, "titles should be a list"
assert (
len(titles) == self._nplots
), "titles length should correspond to number of plot windows"
self._titles = titles
for iax, ax in enumerate(self._axs):
ax.set_title(self._titles[iax])
def _initColors(self):
self._colors = cycle(self._DEFAULT_COLORS)
def _getNextColor(self):
return next(self._colors)
def getAxis(self, win):
"""get the matplotlib.axes._subplots.AxesSubplot at given index"""
try:
return self._axs[win]
except IndexError:
self._logger.error("Wrong plot index")
return None
def subplots_adjust(self, **kws):
return self._fig.subplots_adjust(**kws)
def newplot(self, *args, **kwargs):
"""Plot command with forced replace=True"""
_ = kwargs.pop("replace", True)
return self.plot(*args, replace=True, **kwargs)
def plot(
self,
x,
y,
label=None,
win=0,
color=None,
side="left",
show_legend=None,
replace=False,
xscale="linear",
yscale="linear",
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
title=None,
**plotkws,
):
"""plot in given axis
Parameters
==========
x, y : arrays to plot
label : str
label for the legend [None]
win : int
index of self._axs (subplot to use for plot) [0]
color : str
line color [cycle(self._colors)]
side : str
['left']
'right' -> sharex
'top' -> sharey
show_legend : None or dict
if given, it should be a dictonary of parmeters for ax.legend()
**plotkws
keyword arguments for ax.plot()
replace : bool
if True, forces axis update
xscale, yscale : str
"linear", "log", "symlog", "logit", ...
The axis scale type to apply. -> https://matplotlib.org/api/axes_api.html
xlabel, ylabel : str
x, y labels
xlim, ylim: tuples
tuple for x, y limits [(None, None)]
"""
ax = self.getAxis(win)
#: override axis settings
if replace:
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
else:
ax.set_title(f"win={win}")
if color is None:
color = self._getNextColor()
if side == "right":
ax = ax.twinx()
# np.append(self._axs, ax)
if side == "top":
raise NotImplementedError()
#: main wrapped method
ax.plot(x, y, label=label, color=color, **plotkws)
if show_legend is not None:
assert type(show_legend) is dict, "show_legend: None or dict"
ax.legend(**show_legend)
if replace:
self._fig.tight_layout()
def legend(
self,
win=0,
loc="upper right",
bbox_to_anchor=(1.3, 0.95),
borderaxespad=0.1,
title="Legend",
frameon=True,
fancybox=True,
):
"""add a common figure legend"""
handlers, labels = [], []
for ax in self._axs:
_handlers, _labels = ax.get_legend_handles_labels()
handlers.extend(_handlers)
labels.extend(_labels)
self._fig.legend(
handlers,
labels,
loc=loc,
bbox_to_anchor=bbox_to_anchor,
borderaxespad=borderaxespad,
title=title,
frameon=frameon,
fancybox=fancybox,
)
def savefig(self, fig_out=None, dpi_out=300):
"""Save figure to .pdf/.png/.svg files"""
if fig_out is None:
return None
if self._outdir is not None:
fig_out = path.join(self._outdir, fig_out)
self._fig.savefig("{0}.pdf".format(fig_out), dpi=dpi_out, bbox_inches="tight")
self._fig.savefig("{0}.png".format(fig_out), dpi=dpi_out, bbox_inches="tight")
self._fig.savefig("{0}.svg".format(fig_out), dpi=dpi_out, bbox_inches="tight")
self._logger.info("Saved figures .pdf/.png/.svg figures to: %s", fig_out)
if __name__ == "__main__":
pass
| |
# $Id: __init__.py 7061 2011-06-29 16:24:09Z milde $
# Author: Mariano Guerra <luismarianoguerra@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the HTML version 5
The css is based on twitter bootstrap:
http://twitter.github.com/bootstrap/
this code is based on html4css1
"""
from __future__ import absolute_import
__docformat__ = 'reStructuredText'
import os
import re
import json
import os.path
try:
import Image # check for the Python Imaging Library
except ImportError:
Image = None
from docutils import frontend, nodes, utils, writers, languages
from . import html
from .html import *
# import default post processors so they register
from . import postprocessors
if IS_PY3:
basestring = str
def parse_param_value(value):
try:
return json.loads(value)
except ValueError:
return value
DIR_NAME = os.path.dirname(__file__)
class Writer(writers.Writer):
default_stylesheet = os.path.join(DIR_NAME, 'rst2html5.css')
default_stylesheet_path = utils.relative_path(
os.path.join(os.getcwd(), 'dummy'),
os.path.join(DIR_NAME, default_stylesheet))
default_stylesheet_dirs = ['.']
default_template = 'template.txt'
default_template_path = "."
settings_spec = (
'HTML-Specific Options',
None,
[('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % default_stylesheet,
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': [default_stylesheet]}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Embed the content (css, js, etc) in the output HTML file. The content '
'files must be accessible during processing. This is the default.',
['--embed-content'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Add a favicon to the generated page',
['--favicon'],
{'default': None}),])
settings_defaults = {
'output_encoding_error_handler': 'xmlcharrefreplace'
}
post_processors = {}
def __init__(self):
writers.Writer.__init__(self)
self.translator_class = HTMLTranslator
@classmethod
def add_postprocessor(cls, name, opt_name, processor):
opt_switch = '--' + opt_name.replace("_", "-")
opt_switch_params = opt_switch + "-opts"
opt_params_name = opt_name + "_opts"
cls.settings_spec[2].append((name, [opt_switch], {
'dest': opt_name,
'action': 'store_true',
'validator': frontend.validate_boolean
}
))
cls.settings_spec[2].append(("set " + name + " params",
[opt_switch_params], {'dest': opt_params_name}))
cls.post_processors[opt_name] = processor
def translate(self):
visitor = self.translator_class(self.document)
self.document.walkabout(visitor)
tree = visitor.get_tree()
settings = self.document.settings
embed = settings.embed_content
favicon_path = settings.favicon
if favicon_path:
tree[0].append(Link(href=favicon_path, rel="shortcut icon"))
for (key, processor) in Writer.post_processors.items():
if getattr(settings, key):
params_str = getattr(settings, key + "_opts") or ""
pairs = []
for keyval in params_str.split(","):
if "=" not in keyval:
continue
key, val = keyval.split("=", 1)
parsed_val = parse_param_value(val)
pairs.append((key, parsed_val))
params = {}
# a key that appears more than once is converted into a list
# of the found values
for key, val in pairs:
if key in params:
current_val = params[key]
if isinstance(current_val, list):
current_val.append(val)
else:
params[key] = [current_val, val]
else:
params[key] = val
processor(tree, embed, **params)
# tell the visitor to append the default stylesheets
# we call it after the postprocessors to make sure it haves precedence
visitor.append_default_stylesheets()
self.output = DOCTYPE
self.output += str(tree)
for (key, data) in postprocessors.PROCESSORS.items():
Writer.add_postprocessor(data["name"], key, data["processor"])
def docinfo_address(node, translator):
return docinfo_item(node, translator, lambda: Pre(class_="address"))
def docinfo_authors(node, translator):
return docinfo_item(node, translator, lambda: Ul(class_="authors"))
def docinfo_item(node, translator, inner=None):
name = node.tagname
label = translator.language.labels.get(name, name)
td = Td()
current = td
if inner is not None:
current = inner()
td.append(current)
translator._append(Tr(Td(label, class_="field-label"), td), node)
return current
def problematic(node, translator):
name = node.tagname
label = translator.language.labels.get(name, name)
current = Span(class_="problematic")
wrapper = A(current, href="#" + node['refid'])
translator._append(wrapper, node)
return current
def classifier(node, translator):
term = translator.current[-1]
new_current = Span(class_="classifier")
term.append(Span(" :", class_="classifier-delimiter"))
term.append(new_current)
return new_current
def admonition(node, translator):
classes = " ".join(node.get('classes', []))
tagname = node.tagname.lower()
if classes:
classes = " " + classes
cls = 'alert-message block-message '
if tagname in ('note', 'tip', 'hint'):
cls += 'info'
elif tagname in ('attention', 'caution', 'important', 'warning'):
cls += 'warning'
elif tagname in ('error', 'danger'):
cls += 'error'
else:
cls += tagname
cls += classes
title = ""
if tagname != "admonition":
title = tagname.title()
div = Div(P(title, class_="admonition-title"), class_=cls)
translator._append(div, node)
return div
def skip(node, translator):
return translator.current
def swallow_childs(node, translator):
return Span(class_="remove-me")
def raw(node, translator):
result = html.raw(node.astext())
translator._append(result, node)
return result
NODES = {
"abbreviation": Abbr,
"acronym": Abbr,
# docinfo
"address": docinfo_address,
"organization": docinfo_item,
"revision": docinfo_item,
"status": docinfo_item,
"version": docinfo_item,
"author": docinfo_item,
"authors": docinfo_authors,
"contact": docinfo_item,
"copyright": docinfo_item,
"date": docinfo_item,
"docinfo": Table,
"docinfo_item": None,
"admonition": admonition,
"note": admonition,
"tip": admonition,
"hint": admonition,
"attention": admonition,
"caution": admonition,
"important": admonition,
"warning": admonition,
"error": admonition,
"danger": admonition,
"attribution": (P, "attribution"),
"block_quote": Blockquote,
"bullet_list": Ul,
"caption": Figcaption,
"citation": (Div, "cite"),
"citation_reference": None,
"classifier": classifier,
"colspec": skip,
"comment": lambda node, _: Comment(node),
"compound": None,
"container": None,
"decoration": skip,
"definition": Dd,
"definition_list": Dl,
"definition_list_item": skip,
"description": Td,
"doctest_block": (Pre, "prettyprint lang-python"),
"document": None,
"emphasis": Em,
"field": Tr,
"field_body": Td,
"field_list": Table,
"field_name": (Td, "field-label"),
"figure": Figure,
"footer": skip, # TODO temporary skip
"footnote": None,
"footnote_reference": None,
"generated": skip,
"header": skip, # TODO temporary skip
"image": Img,
"inline": Span,
"label": (Div, "du-label"),
"legend": skip,
"line": None,
"line_block": None,
"list_item": Li,
"literal": Code, # inline literal markup use the <code> tag in HTML5. inline code uses <code class="code">
"math": None,
"math_block": None,
"meta": Meta,
"option": (P, "option"),
"option_argument": Var,
"option_group": Td,
"option_list": (Table, "option-list"),
"option_list_item": Tr,
"option_string": skip,
"paragraph": P,
"problematic": problematic,
"raw": raw,
"reference": None,
"row": Tr,
"rubric": None,
"sidebar": Aside,
"strong": Strong,
"subscript": Sub,
"substitution_definition": swallow_childs,
"substitution_reference": None,
"superscript": Sup,
"table": Table,
"tbody": Tbody,
"term": Dt,
"tgroup": skip,
"thead": Thead,
"title_reference": Cite,
"transition": Hr,
# handled in visit_*
"entry": None,
"enumerated_list": None,
"literal_block": None,
"target": None,
"text": None,
"title": None,
"topic": None,
"section": None,
"subtitle": None,
"system_message": None,
}
class HTMLTranslator(nodes.NodeVisitor):
def __init__(self, document):
nodes.NodeVisitor.__init__(self, document)
self.root = Body()
self.indent = 1
self.parents = []
self.current = self.root
self.settings = document.settings
self.title = self.settings.title or ""
self.title_level = int(self.settings.initial_header_level)
lcode = document.settings.language_code
try:
self.language = languages.get_language(lcode)
except TypeError:
self.language = languages.get_language(lcode, document.reporter)
# make settings for this
self.content_type = self.settings.output_encoding
self.head = Head(
Meta(charset=self.content_type),
Title(self.title))
def append_default_stylesheets(self):
""" Appends the default styles defined on the translator settings. """
styles = utils.get_stylesheet_list(self.settings)
for style in styles:
self.head.append(self.css(style))
def css(self, path):
if self.settings.embed_content:
content = open(path).read()
return Style(content, type="text/css")
else:
return Link(href=path, rel="stylesheet", type_="text/css")
def js(self, path):
content = open(path).read().decode('utf-8')
return Script(content)
def get_tree(self):
return Html(self.head, self.root)
def astext(self):
return self.get_tree().format(0, self.indent)
def _stack(self, tag, node, append_tag=True):
self.parents.append(self.current)
if append_tag:
self._append(tag, node)
self.current = tag
def _append(self, tag, node):
self.current.append(tag)
if isinstance(tag, basestring):
return
atts = {}
ids = []
classes = node.get('classes', [])
cls = node.get("class", None)
if cls is not None:
classes.append(cls)
# move language specification to 'lang' attribute
languages = [cls for cls in classes
if cls.startswith('language-')]
if languages:
# attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1
atts['lang'] = languages[0][9:]
classes.pop(classes.index(languages[0]))
classes = ' '.join(classes).strip()
if classes:
atts['class'] = classes
assert 'id' not in atts
ids.extend(node.get('ids', []))
if 'ids' in atts:
ids.extend(atts['ids'])
del atts['ids']
if ids:
atts['id'] = ids[0]
# ids must be appended as first children to the tag
for id in ids[1:]:
tag.append(Span(id=id))
tag.attrib.update(atts)
def pop_parent(self, node):
self.current = self.parents.pop()
def visit_Text(self, node):
self._append(unicode(node.astext()), node)
def visit_entry(self, node):
atts = {}
if isinstance(node.parent.parent, nodes.thead):
tag = Th()
else:
tag = Td()
if 'morerows' in node:
atts['rowspan'] = node['morerows'] + 1
if 'morecols' in node:
atts['colspan'] = node['morecols'] + 1
tag.attrib.update(atts)
if len(node) == 0: # empty cell
tag.append(".")
self._stack(tag, node)
def depart_Text(self, node):
pass
def visit_literal_block(self, node):
pre = Pre()
self._stack(pre, node, True)
if 'code' in node.get('classes', []):
code = Code()
self._stack(code, node)
del pre.attrib['class']
def depart_literal_block(self, node):
if isinstance(self.current, Code):
self.current = self.parents.pop()
self.current = self.parents.pop()
def visit_title(self, node, sub=0):
if isinstance(self.current, Table):
self._stack(Caption(), node)
else:
heading = HEADINGS.get(self.title_level + sub, H6)()
current = heading
insert_current = True
# only wrap in header tags if the <title> is a child of section
# this excludes the main page title, subtitles and topics
if self.current.tag == "section":
self._stack(Header(), node, True)
if node.hasattr('refid'):
current = A(href= '#' + node['refid'])
heading.append(current)
insert_current = False
self._append(heading, node)
self._stack(current, node, insert_current)
def depart_title(self, node):
self.current = self.parents.pop()
if self.current.tag == "header":
self.current = self.parents.pop()
def visit_subtitle(self, node):
self.visit_title(node, 1)
def visit_topic(self, node):
self.title_level += 1
self._stack(Div(class_="topic"), node)
def depart_topic(self, node):
self.title_level -= 1
self.pop_parent(node)
def visit_section(self, node):
self.title_level += 1
self._stack(Section(), node)
depart_section = depart_topic
def visit_document(self, node):
#self.head[1].text = node.get('title', 'document')
pass
def depart_document(self, node):
pass
def visit_reference(self, node):
tag = A()
atts = {"class": "reference"}
if 'refuri' in node:
atts['href'] = node['refuri']
atts['class'] += ' external'
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
atts['href'] = '#' + node['refid']
atts['class'] += ' internal'
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
tag.attrib.update(atts)
self._stack(tag, node)
def visit_citation_reference(self, node):
tag = A(href='#' + node['refid'], class_="citation-reference")
self._stack(tag, node)
def visit_footnote_reference(self, node):
href = '#' + node['refid']
tag = A(class_="footnote-reference", href=href)
self._stack(tag, node)
def visit_target(self, node):
append_tag = not ('refuri' in node or 'refid' in node or 'refname' in node)
self._stack(Span(class_="target"), node, append_tag)
def visit_author(self, node):
if isinstance(self.current, Ul):
tag = Li(class_="author")
self._append(tag, node)
else:
tag = docinfo_item(node, self)
self.parents.append(self.current)
self.current = tag
def visit_enumerated_list(self, node):
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
self._stack(Ol(**atts), node)
def visit_system_message(self, node):
msg_type = node['type']
cont = Div(class_='alert-message block-message system-message ' +
msg_type.lower())
text = P("System Message: %s/%s" % (msg_type, node['level']),
class_='system-message-title admonition-title')
cont.append(text)
backlinks = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backlinks = Em(A("backlink", href="#" + backrefs[0]))
else:
backlinks = Div(P("backlinks"), class_="backrefs")
for (i, backref) in enumerate(backrefs):
backlinks.append(A(str(i), href="#" + backref))
backlinks.append(" ")
if node.hasattr('line'):
line = 'line %s ' % node['line']
else:
line = ' '
cont.append(Span(quote(node['source']), class_="literal"))
cont.append(line)
cont.append(backlinks)
self._stack(cont, node)
def visit_image(self, node):
atts = {}
uri = node['uri']
# place SVG and SWF images in an <object> element
types = {
'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'
}
ext = os.path.splitext(uri)[1].lower()
if ext in ('.svg', '.swf'):
atts['data'] = uri
atts['type'] = types[ext]
else:
atts['src'] = uri
atts['alt'] = node.get('alt', uri)
# image size
if 'width' in node:
atts['width'] = node['width']
if 'height' in node:
atts['height'] = node['height']
if 'scale' in node:
if Image and not ('width' in node and 'height' in node):
try:
im = Image.open(str(uri))
except (IOError, # Source image can't be found or opened
UnicodeError): # PIL doesn't like Unicode paths.
pass
else:
if 'width' not in atts:
atts['width'] = str(im.size[0])
if 'height' not in atts:
atts['height'] = str(im.size[1])
del im
for att_name in 'width', 'height':
if att_name in atts:
match = re.match(r'([0-9.]+)(\S*)$', atts[att_name])
assert match
atts[att_name] = '%s%s' % (
float(match.group(1)) * (float(node['scale']) / 100),
match.group(2))
style = []
for att_name in 'width', 'height':
if att_name in atts:
if re.match(r'^[0-9.]+$', atts[att_name]):
# Interpret unitless values as pixels.
atts[att_name] += 'px'
style.append('%s: %s;' % (att_name, atts[att_name]))
del atts[att_name]
if style:
atts['style'] = ' '.join(style)
if 'align' in node:
atts['class'] = 'align-%s' % node['align']
if ext in ('.svg', '.swf'): # place in an object element,
tag = Object(node.get('alt', uri))
else:
tag = Img()
tag.attrib.update(atts)
self._stack(tag, node)
def unknown_visit(self, node):
nodename = node.__class__.__name__
handler = NODES.get(nodename, None)
already_inserted = False
if isinstance(handler, tuple):
tag_class, cls = handler
new_current = tag_class(class_=cls)
elif type(handler) == type and issubclass(handler, TagBase):
new_current = handler()
elif callable(handler):
new_current = handler(node, self)
already_inserted = True
else:
known_attributes = self.get_known_attributes(node)
new_current = Div(**known_attributes)
self._stack(new_current, node, not already_inserted)
def get_known_attributes(self, node):
attrs = {}
for attr, value in node.attributes.items():
if attr.startswith("data-") or attr in set(['title', 'class', 'id']):
attrs[attr] = value
return attrs
unknown_departure = pop_parent
depart_reference = pop_parent
| |
# Generated by Snowball 2.1.0 - https://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class RussianStemmer(BaseStemmer):
'''
This class implements the stemming algorithm defined by a snowball script.
Generated by Snowball 2.1.0 - https://snowballstem.org/
'''
a_0 = [
Among(u"\u0432", -1, 1),
Among(u"\u0438\u0432", 0, 2),
Among(u"\u044B\u0432", 0, 2),
Among(u"\u0432\u0448\u0438", -1, 1),
Among(u"\u0438\u0432\u0448\u0438", 3, 2),
Among(u"\u044B\u0432\u0448\u0438", 3, 2),
Among(u"\u0432\u0448\u0438\u0441\u044C", -1, 1),
Among(u"\u0438\u0432\u0448\u0438\u0441\u044C", 6, 2),
Among(u"\u044B\u0432\u0448\u0438\u0441\u044C", 6, 2)
]
a_1 = [
Among(u"\u0435\u0435", -1, 1),
Among(u"\u0438\u0435", -1, 1),
Among(u"\u043E\u0435", -1, 1),
Among(u"\u044B\u0435", -1, 1),
Among(u"\u0438\u043C\u0438", -1, 1),
Among(u"\u044B\u043C\u0438", -1, 1),
Among(u"\u0435\u0439", -1, 1),
Among(u"\u0438\u0439", -1, 1),
Among(u"\u043E\u0439", -1, 1),
Among(u"\u044B\u0439", -1, 1),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u043C", -1, 1),
Among(u"\u043E\u043C", -1, 1),
Among(u"\u044B\u043C", -1, 1),
Among(u"\u0435\u0433\u043E", -1, 1),
Among(u"\u043E\u0433\u043E", -1, 1),
Among(u"\u0435\u043C\u0443", -1, 1),
Among(u"\u043E\u043C\u0443", -1, 1),
Among(u"\u0438\u0445", -1, 1),
Among(u"\u044B\u0445", -1, 1),
Among(u"\u0435\u044E", -1, 1),
Among(u"\u043E\u044E", -1, 1),
Among(u"\u0443\u044E", -1, 1),
Among(u"\u044E\u044E", -1, 1),
Among(u"\u0430\u044F", -1, 1),
Among(u"\u044F\u044F", -1, 1)
]
a_2 = [
Among(u"\u0435\u043C", -1, 1),
Among(u"\u043D\u043D", -1, 1),
Among(u"\u0432\u0448", -1, 1),
Among(u"\u0438\u0432\u0448", 2, 2),
Among(u"\u044B\u0432\u0448", 2, 2),
Among(u"\u0449", -1, 1),
Among(u"\u044E\u0449", 5, 1),
Among(u"\u0443\u044E\u0449", 6, 2)
]
a_3 = [
Among(u"\u0441\u044C", -1, 1),
Among(u"\u0441\u044F", -1, 1)
]
a_4 = [
Among(u"\u043B\u0430", -1, 1),
Among(u"\u0438\u043B\u0430", 0, 2),
Among(u"\u044B\u043B\u0430", 0, 2),
Among(u"\u043D\u0430", -1, 1),
Among(u"\u0435\u043D\u0430", 3, 2),
Among(u"\u0435\u0442\u0435", -1, 1),
Among(u"\u0438\u0442\u0435", -1, 2),
Among(u"\u0439\u0442\u0435", -1, 1),
Among(u"\u0435\u0439\u0442\u0435", 7, 2),
Among(u"\u0443\u0439\u0442\u0435", 7, 2),
Among(u"\u043B\u0438", -1, 1),
Among(u"\u0438\u043B\u0438", 10, 2),
Among(u"\u044B\u043B\u0438", 10, 2),
Among(u"\u0439", -1, 1),
Among(u"\u0435\u0439", 13, 2),
Among(u"\u0443\u0439", 13, 2),
Among(u"\u043B", -1, 1),
Among(u"\u0438\u043B", 16, 2),
Among(u"\u044B\u043B", 16, 2),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u043C", -1, 2),
Among(u"\u044B\u043C", -1, 2),
Among(u"\u043D", -1, 1),
Among(u"\u0435\u043D", 22, 2),
Among(u"\u043B\u043E", -1, 1),
Among(u"\u0438\u043B\u043E", 24, 2),
Among(u"\u044B\u043B\u043E", 24, 2),
Among(u"\u043D\u043E", -1, 1),
Among(u"\u0435\u043D\u043E", 27, 2),
Among(u"\u043D\u043D\u043E", 27, 1),
Among(u"\u0435\u0442", -1, 1),
Among(u"\u0443\u0435\u0442", 30, 2),
Among(u"\u0438\u0442", -1, 2),
Among(u"\u044B\u0442", -1, 2),
Among(u"\u044E\u0442", -1, 1),
Among(u"\u0443\u044E\u0442", 34, 2),
Among(u"\u044F\u0442", -1, 2),
Among(u"\u043D\u044B", -1, 1),
Among(u"\u0435\u043D\u044B", 37, 2),
Among(u"\u0442\u044C", -1, 1),
Among(u"\u0438\u0442\u044C", 39, 2),
Among(u"\u044B\u0442\u044C", 39, 2),
Among(u"\u0435\u0448\u044C", -1, 1),
Among(u"\u0438\u0448\u044C", -1, 2),
Among(u"\u044E", -1, 2),
Among(u"\u0443\u044E", 44, 2)
]
a_5 = [
Among(u"\u0430", -1, 1),
Among(u"\u0435\u0432", -1, 1),
Among(u"\u043E\u0432", -1, 1),
Among(u"\u0435", -1, 1),
Among(u"\u0438\u0435", 3, 1),
Among(u"\u044C\u0435", 3, 1),
Among(u"\u0438", -1, 1),
Among(u"\u0435\u0438", 6, 1),
Among(u"\u0438\u0438", 6, 1),
Among(u"\u0430\u043C\u0438", 6, 1),
Among(u"\u044F\u043C\u0438", 6, 1),
Among(u"\u0438\u044F\u043C\u0438", 10, 1),
Among(u"\u0439", -1, 1),
Among(u"\u0435\u0439", 12, 1),
Among(u"\u0438\u0435\u0439", 13, 1),
Among(u"\u0438\u0439", 12, 1),
Among(u"\u043E\u0439", 12, 1),
Among(u"\u0430\u043C", -1, 1),
Among(u"\u0435\u043C", -1, 1),
Among(u"\u0438\u0435\u043C", 18, 1),
Among(u"\u043E\u043C", -1, 1),
Among(u"\u044F\u043C", -1, 1),
Among(u"\u0438\u044F\u043C", 21, 1),
Among(u"\u043E", -1, 1),
Among(u"\u0443", -1, 1),
Among(u"\u0430\u0445", -1, 1),
Among(u"\u044F\u0445", -1, 1),
Among(u"\u0438\u044F\u0445", 26, 1),
Among(u"\u044B", -1, 1),
Among(u"\u044C", -1, 1),
Among(u"\u044E", -1, 1),
Among(u"\u0438\u044E", 30, 1),
Among(u"\u044C\u044E", 30, 1),
Among(u"\u044F", -1, 1),
Among(u"\u0438\u044F", 33, 1),
Among(u"\u044C\u044F", 33, 1)
]
a_6 = [
Among(u"\u043E\u0441\u0442", -1, 1),
Among(u"\u043E\u0441\u0442\u044C", -1, 1)
]
a_7 = [
Among(u"\u0435\u0439\u0448\u0435", -1, 1),
Among(u"\u043D", -1, 2),
Among(u"\u0435\u0439\u0448", -1, 1),
Among(u"\u044C", -1, 3)
]
g_v = [33, 65, 8, 232]
I_p2 = 0
I_pV = 0
def __r_mark_regions(self):
self.I_pV = self.limit
self.I_p2 = self.limit
v_1 = self.cursor
try:
if not self.go_out_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab0()
self.cursor += 1
self.I_pV = self.cursor
if not self.go_in_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab0()
self.cursor += 1
if not self.go_out_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab0()
self.cursor += 1
if not self.go_in_grouping(RussianStemmer.g_v, 1072, 1103):
raise lab0()
self.cursor += 1
self.I_p2 = self.cursor
except lab0: pass
self.cursor = v_1
return True
def __r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def __r_perfective_gerund(self):
self.ket = self.cursor
among_var = self.find_among_b(RussianStemmer.a_0)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
try:
v_1 = self.limit - self.cursor
try:
if not self.eq_s_b(u"\u0430"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.eq_s_b(u"\u044F"):
return False
except lab0: pass
if not self.slice_del():
return False
else:
if not self.slice_del():
return False
return True
def __r_adjective(self):
self.ket = self.cursor
if self.find_among_b(RussianStemmer.a_1) == 0:
return False
self.bra = self.cursor
if not self.slice_del():
return False
return True
def __r_adjectival(self):
if not self.__r_adjective():
return False
v_1 = self.limit - self.cursor
try:
self.ket = self.cursor
among_var = self.find_among_b(RussianStemmer.a_2)
if among_var == 0:
self.cursor = self.limit - v_1
raise lab0()
self.bra = self.cursor
if among_var == 1:
try:
v_2 = self.limit - self.cursor
try:
if not self.eq_s_b(u"\u0430"):
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
if not self.eq_s_b(u"\u044F"):
self.cursor = self.limit - v_1
raise lab0()
except lab1: pass
if not self.slice_del():
return False
else:
if not self.slice_del():
return False
except lab0: pass
return True
def __r_reflexive(self):
self.ket = self.cursor
if self.find_among_b(RussianStemmer.a_3) == 0:
return False
self.bra = self.cursor
if not self.slice_del():
return False
return True
def __r_verb(self):
self.ket = self.cursor
among_var = self.find_among_b(RussianStemmer.a_4)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
try:
v_1 = self.limit - self.cursor
try:
if not self.eq_s_b(u"\u0430"):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
if not self.eq_s_b(u"\u044F"):
return False
except lab0: pass
if not self.slice_del():
return False
else:
if not self.slice_del():
return False
return True
def __r_noun(self):
self.ket = self.cursor
if self.find_among_b(RussianStemmer.a_5) == 0:
return False
self.bra = self.cursor
if not self.slice_del():
return False
return True
def __r_derivational(self):
self.ket = self.cursor
if self.find_among_b(RussianStemmer.a_6) == 0:
return False
self.bra = self.cursor
if not self.__r_R2():
return False
if not self.slice_del():
return False
return True
def __r_tidy_up(self):
self.ket = self.cursor
among_var = self.find_among_b(RussianStemmer.a_7)
if among_var == 0:
return False
self.bra = self.cursor
if among_var == 1:
if not self.slice_del():
return False
self.ket = self.cursor
if not self.eq_s_b(u"\u043D"):
return False
self.bra = self.cursor
if not self.eq_s_b(u"\u043D"):
return False
if not self.slice_del():
return False
elif among_var == 2:
if not self.eq_s_b(u"\u043D"):
return False
if not self.slice_del():
return False
else:
if not self.slice_del():
return False
return True
def _stem(self):
v_1 = self.cursor
try:
while True:
v_2 = self.cursor
try:
try:
while True:
v_3 = self.cursor
try:
self.bra = self.cursor
if not self.eq_s(u"\u0451"):
raise lab3()
self.ket = self.cursor
self.cursor = v_3
raise lab2()
except lab3: pass
self.cursor = v_3
if self.cursor >= self.limit:
raise lab1()
self.cursor += 1
except lab2: pass
if not self.slice_from(u"\u0435"):
return False
continue
except lab1: pass
self.cursor = v_2
break
except lab0: pass
self.cursor = v_1
self.__r_mark_regions()
self.limit_backward = self.cursor
self.cursor = self.limit
if self.cursor < self.I_pV:
return False
v_6 = self.limit_backward
self.limit_backward = self.I_pV
v_7 = self.limit - self.cursor
try:
try:
v_8 = self.limit - self.cursor
try:
if not self.__r_perfective_gerund():
raise lab6()
raise lab5()
except lab6: pass
self.cursor = self.limit - v_8
v_9 = self.limit - self.cursor
try:
if not self.__r_reflexive():
self.cursor = self.limit - v_9
raise lab7()
except lab7: pass
try:
v_10 = self.limit - self.cursor
try:
if not self.__r_adjectival():
raise lab9()
raise lab8()
except lab9: pass
self.cursor = self.limit - v_10
try:
if not self.__r_verb():
raise lab10()
raise lab8()
except lab10: pass
self.cursor = self.limit - v_10
if not self.__r_noun():
raise lab4()
except lab8: pass
except lab5: pass
except lab4: pass
self.cursor = self.limit - v_7
v_11 = self.limit - self.cursor
try:
self.ket = self.cursor
if not self.eq_s_b(u"\u0438"):
self.cursor = self.limit - v_11
raise lab11()
self.bra = self.cursor
if not self.slice_del():
return False
except lab11: pass
v_12 = self.limit - self.cursor
self.__r_derivational()
self.cursor = self.limit - v_12
v_13 = self.limit - self.cursor
self.__r_tidy_up()
self.cursor = self.limit - v_13
self.limit_backward = v_6
self.cursor = self.limit_backward
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class lab10(BaseException): pass
class lab11(BaseException): pass
| |
import numpy as np
try:
from malpi.im2col_cython import col2im_cython, im2col_cython
from malpi.im2col_cython import col2im_6d_cython
except ImportError:
print 'run the following from the malpi directory and try again:'
print 'python setup.py build_ext --inplace'
print 'You may also need to restart your iPython kernel'
from malpi.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
#print "W/pad/fw/str: %d/%d/%d/%d" % (W,pad,filter_width,stride)
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param, mode='train'):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
if 'train' == mode:
cache = (x, w, b, conv_param, x_cols)
else:
cache = ()
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
#conv_forward_fast = conv_forward_strides
#conv_backward_fast = conv_backward_strides
conv_forward_fast = conv_forward_im2col
conv_backward_fast = conv_backward_im2col
def max_pool_forward_fast(x, pool_param, mode='train'):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
if 'pool_stride' in pool_param:
stride = pool_param['pool_stride']
pool_param['stride'] = stride
else:
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param, mode=mode)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param, mode=mode)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param, mode='train'):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
if 'train' == mode:
cache = (x, x_reshaped, out)
else:
cache = ()
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param, mode='train'):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
if 'train' == mode:
cache = (x, x_cols, x_cols_argmax, pool_param)
else:
cache = ()
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
| |
# Copyright 2000-2004 Michael Hudson-Doyle <micahel@gmail.com>
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# the pygame console is currently thoroughly broken.
# there's a fundamental difference from the UnixConsole: here we're
# the terminal emulator too, in effect. This means, e.g., for pythoni
# we really need a separate process (or thread) to monitor for ^C
# during command execution and zap the executor process. Making this
# work on non-Unix is expected to be even more entertaining.
from pygame.locals import *
from pyrepl.console import Console, Event
from pyrepl import pygame_keymap
import pygame
import types
lmargin = 5
rmargin = 5
tmargin = 5
bmargin = 5
try:
bool
except NameError:
def bool(x):
return not not x
modcolors = {K_LCTRL:1,
K_RCTRL:1,
K_LMETA:1,
K_RMETA:1,
K_LALT:1,
K_RALT:1,
K_LSHIFT:1,
K_RSHIFT:1}
class colors:
fg = 250,240,230
bg = 5, 5, 5
cursor = 230, 0, 230
margin = 5, 5, 15
class FakeStdout:
def __init__(self, con):
self.con = con
def write(self, text):
self.con.write(text)
def flush(self):
pass
class FakeStdin:
def __init__(self, con):
self.con = con
def read(self, n=None):
# argh!
raise NotImplementedError
def readline(self, n=None):
from reader import Reader
try:
# this isn't quite right: it will clobber any prompt that's
# been printed. Not sure how to get around this...
return Reader(self.con).readline()
except EOFError:
return ''
class PyGameConsole(Console):
"""Attributes:
(keymap),
(fd),
screen,
height,
width,
"""
def __init__(self):
self.pygame_screen = pygame.display.set_mode((800, 600))
pygame.font.init()
pygame.key.set_repeat(500, 30)
self.font = pygame.font.Font(
"/usr/X11R6/lib/X11/fonts/TTF/luximr.ttf", 15)
self.fw, self.fh = self.fontsize = self.font.size("X")
self.cursor = pygame.Surface(self.fontsize)
self.cursor.fill(colors.cursor)
self.clear()
self.curs_vis = 1
self.height, self.width = self.getheightwidth()
pygame.display.update()
pygame.event.set_allowed(None)
pygame.event.set_allowed(KEYDOWN)
def install_keymap(self, keymap):
"""Install a given keymap.
keymap is a tuple of 2-element tuples; each small tuple is a
pair (keyspec, event-name). The format for keyspec is
modelled on that used by readline (so read that manual for
now!)."""
self.k = self.keymap = pygame_keymap.compile_keymap(keymap)
def char_rect(self, x, y):
return self.char_pos(x, y), self.fontsize
def char_pos(self, x, y):
return (lmargin + x*self.fw,
tmargin + y*self.fh + self.cur_top + self.scroll)
def paint_margin(self):
s = self.pygame_screen
c = colors.margin
s.fill(c, [0, 0, 800, tmargin])
s.fill(c, [0, 0, lmargin, 600])
s.fill(c, [0, 600 - bmargin, 800, bmargin])
s.fill(c, [800 - rmargin, 0, lmargin, 600])
def refresh(self, screen, cxy):
self.screen = screen
self.pygame_screen.fill(colors.bg,
[0, tmargin + self.cur_top + self.scroll,
800, 600])
self.paint_margin()
line_top = self.cur_top
width, height = self.fontsize
self.cxy = cxy
cp = self.char_pos(*cxy)
if cp[1] < tmargin:
self.scroll = - (cy*self.fh + self.cur_top)
self.repaint()
elif cp[1] + self.fh > 600 - bmargin:
self.scroll += (600 - bmargin) - (cp[1] + self.fh)
self.repaint()
if self.curs_vis:
self.pygame_screen.blit(self.cursor, self.char_pos(*cxy))
for line in screen:
if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh):
if line:
ren = self.font.render(line, 1, colors.fg)
self.pygame_screen.blit(ren, (lmargin,
tmargin + line_top + self.scroll))
line_top += self.fh
pygame.display.update()
def prepare(self):
self.cmd_buf = ''
self.k = self.keymap
self.height, self.width = self.getheightwidth()
self.curs_vis = 1
self.cur_top = self.pos[0]
self.event_queue = []
def restore(self):
pass
def blit_a_char(self, linen, charn):
line = self.screen[linen]
if charn < len(line):
text = self.font.render(line[charn], 1, colors.fg)
self.pygame_screen.blit(text, self.char_pos(charn, linen))
def move_cursor(self, x, y):
cp = self.char_pos(x, y)
if cp[1] < tmargin or cp[1] + self.fh > 600 - bmargin:
self.event_queue.append(Event('refresh', '', ''))
else:
if self.curs_vis:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
self.pygame_screen.blit(self.cursor, cp)
self.blit_a_char(y, x)
pygame.display.update()
self.cxy = (x, y)
def set_cursor_vis(self, vis):
self.curs_vis = vis
if vis:
self.move_cursor(*self.cxy)
else:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
pygame.display.update()
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
return ((600 - tmargin - bmargin)/self.fh,
(800 - lmargin - rmargin)/self.fw)
def tr_event(self, pyg_event):
shift = bool(pyg_event.mod & KMOD_SHIFT)
ctrl = bool(pyg_event.mod & KMOD_CTRL)
meta = bool(pyg_event.mod & (KMOD_ALT|KMOD_META))
try:
return self.k[(pyg_event.unicode, meta, ctrl)], pyg_event.unicode
except KeyError:
try:
return self.k[(pyg_event.key, meta, ctrl)], pyg_event.unicode
except KeyError:
return "invalid-key", pyg_event.unicode
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
while 1:
if self.event_queue:
return self.event_queue.pop(0)
elif block:
pyg_event = pygame.event.wait()
else:
pyg_event = pygame.event.poll()
if pyg_event.type == NOEVENT:
return
if pyg_event.key in modcolors:
continue
k, c = self.tr_event(pyg_event)
self.cmd_buf += c.encode('ascii', 'replace')
self.k = k
if not isinstance(k, types.DictType):
e = Event(k, self.cmd_buf, [])
self.k = self.keymap
self.cmd_buf = ''
return e
def beep(self):
# uhh, can't be bothered now.
# pygame.sound.something, I guess.
pass
def clear(self):
"""Wipe the screen"""
self.pygame_screen.fill(colors.bg)
#self.screen = []
self.pos = [0, 0]
self.grobs = []
self.cur_top = 0
self.scroll = 0
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
if self.curs_vis:
cx, cy = self.cxy
self.pygame_screen.fill(colors.bg, self.char_rect(cx, cy))
self.blit_a_char(cy, cx)
for line in self.screen:
self.write_line(line, 1)
if self.curs_vis:
self.pygame_screen.blit(self.cursor,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
pygame.display.update()
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)"""
# no buffering here, ma'am (though perhaps there should be!)
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
while pygame.event.poll().type <> NOEVENT:
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
events = []
while 1:
event = pygame.event.poll()
if event.type == NOEVENT:
break
events.append(event)
return events
def wait(self):
"""Wait for an event."""
raise Exception, "erp!"
def repaint(self):
# perhaps we should consolidate grobs?
self.pygame_screen.fill(colors.bg)
self.paint_margin()
for (y, x), surf, text in self.grobs:
if surf and 0 < y + self.scroll:
self.pygame_screen.blit(surf, (lmargin + x,
tmargin + y + self.scroll))
pygame.display.update()
def write_line(self, line, ret):
charsleft = (self.width*self.fw - self.pos[1])/self.fw
while len(line) > charsleft:
self.write_line(line[:charsleft], 1)
line = line[charsleft:]
if line:
ren = self.font.render(line, 1, colors.fg, colors.bg)
self.grobs.append((self.pos[:], ren, line))
self.pygame_screen.blit(ren,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
else:
self.grobs.append((self.pos[:], None, line))
if ret:
self.pos[0] += self.fh
if tmargin + self.pos[0] + self.scroll + self.fh > 600 - bmargin:
self.scroll = 600 - bmargin - self.pos[0] - self.fh - tmargin
self.repaint()
self.pos[1] = 0
else:
self.pos[1] += self.fw*len(line)
def write(self, text):
lines = text.split("\n")
if self.curs_vis:
self.pygame_screen.fill(colors.bg,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll,
self.fw, self.fh))
for line in lines[:-1]:
self.write_line(line, 1)
self.write_line(lines[-1], 0)
if self.curs_vis:
self.pygame_screen.blit(self.cursor,
(lmargin + self.pos[1],
tmargin + self.pos[0] + self.scroll))
pygame.display.update()
def flush(self):
pass
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides support to auto-tuning networks using AutoTVM.
"""
import os.path
import logging
import time
from copy import deepcopy
from typing import Any, Optional, Dict, List, Union
from urllib.parse import urlparse
import tvm
from tvm import autotvm, auto_scheduler
from tvm.auto_scheduler.search_task import HardwareParams
from tvm.autotvm.tuner import GATuner
from tvm.autotvm.tuner import GridSearchTuner
from tvm.autotvm.tuner import RandomTuner
from tvm.autotvm.tuner import XGBTuner
from tvm.target import Target
from . import TVMCException, composite_target, frontends
from .main import register_parser
from .model import TVMCModel
from .target import target_from_cli, generate_target_args, reconstruct_target_args
from .shape_parser import parse_shape_string
from .transform import convert_graph_layout
# pylint: disable=invalid-name
logger = logging.getLogger("TVMC")
@register_parser
def add_tune_parser(subparsers, _):
"""Include parser for 'tune' subcommand"""
parser = subparsers.add_parser("tune", help="auto-tune a model")
parser.set_defaults(func=drive_tune)
parser.add_argument(
"--early-stopping",
type=int,
help="minimum number of trials before early stopping",
)
# There is some extra processing required to define the actual default value
# for --min-repeat-ms. This is done in `tune_model`.
parser.add_argument(
"--min-repeat-ms",
default=None,
type=int,
help="minimum time to run each trial, in milliseconds. "
"Defaults to 0 on x86 and 1000 on all other targets",
)
parser.add_argument(
"--model-format",
choices=frontends.get_frontend_names(),
help="specify input model format",
)
parser.add_argument(
"--number",
default=10,
type=int,
help="number of runs a single repeat is made of. "
"The final number of tuning executions is: "
"(1 + number * repeat)",
)
parser.add_argument(
"-o",
"--output",
required=True,
help="output file to store the tuning records for the tuning process",
)
parser.add_argument(
"--parallel",
default=4,
type=int,
help="the maximum number of parallel devices to use when tuning",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="how many times to repeat each measurement",
)
parser.add_argument(
"--rpc-key",
help="the RPC tracker key of the target device. "
"Required when --rpc-tracker is provided.",
)
parser.add_argument(
"--rpc-tracker",
help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, "
"e.g. '192.168.0.100:9999'",
)
generate_target_args(parser)
parser.add_argument(
"--target-host",
help="the host compilation target, defaults to 'llvm'",
default="llvm",
)
parser.add_argument("--timeout", type=int, default=10, help="compilation timeout, in seconds")
parser.add_argument(
"--trials",
type=int,
default=1000,
help="the maximum number of tuning trials to perform",
)
parser.add_argument(
"--tuning-records",
metavar="PATH",
help="path to an auto-tuning log file by AutoTVM.",
)
parser.add_argument(
"--desired-layout",
choices=["NCHW", "NHWC"],
default=None,
help="change the data layout of the whole graph",
)
parser.add_argument(
"--enable-autoscheduler",
help="enable tuning the graph through the autoscheduler",
action="store_true",
)
auto_scheduler_group = parser.add_argument_group(
"Autoscheduler options",
"Autoscheduler options, used when --enable-autoscheduler is provided",
)
auto_scheduler_group.add_argument(
"--cache-line-bytes",
type=int,
help="the size of cache line in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--num-cores",
type=int,
help="the number of device cores. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--vector-unit-bytes",
type=int,
help="the width of vector units in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-shared-memory-per-block",
type=int,
help="the max shared memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-local-memory-per-block",
type=int,
help="the max local memory per block in bytes. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-threads-per-block",
type=int,
help="the max number of threads per block. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--max-vthread-extent",
type=int,
help="the max vthread extent. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--warp-size",
type=int,
help="the thread numbers of a warp. "
"If not specified, it will be autoset for the current machine.",
)
auto_scheduler_group.add_argument(
"--include-simple-tasks",
help="whether to extract simple tasks that do not include complicated ops",
action="store_true",
)
auto_scheduler_group.add_argument(
"--log-estimated-latency",
help="whether to log the estimated latency to the file after tuning a task",
action="store_true",
)
autotvm_group = parser.add_argument_group(
"autotvm options",
"autotvm options, used when the autoscheduler is not enabled",
)
autotvm_group.add_argument(
"--tuner",
choices=["ga", "gridsearch", "random", "xgb", "xgb_knob", "xgb-rank"],
default="xgb",
help="type of tuner to use when tuning with autotvm.",
)
# TODO (@leandron) This is a path to a physical file, but
# can be improved in future to add integration with a modelzoo
# or URL, for example.
parser.add_argument("FILE", help="path to the input model file")
parser.add_argument(
"--input-shapes",
help="specify non-generic shapes for model to run, format is "
'"input_name:[dim1,dim2,...,dimn] input_name2:[dim1,dim2]"',
type=parse_shape_string,
)
def drive_tune(args):
"""Invoke auto-tuning with command line arguments
Parameters
----------
args: argparse.Namespace
Arguments from command line parser.
"""
tvmc_model = frontends.load_model(args.FILE, args.model_format, shape_dict=args.input_shapes)
# Specify hardware parameters, although they'll only be used if autoscheduling.
hardware_params = auto_scheduler.HardwareParams(
num_cores=args.num_cores,
vector_unit_bytes=args.vector_unit_bytes,
cache_line_bytes=args.cache_line_bytes,
max_shared_memory_per_block=args.max_shared_memory_per_block,
max_local_memory_per_block=args.max_local_memory_per_block,
max_threads_per_block=args.max_threads_per_block,
max_vthread_extent=args.max_vthread_extent,
warp_size=args.warp_size,
target=args.target,
target_host=args.target_host,
)
if args.rpc_tracker:
parsed_url = urlparse("//%s" % args.rpc_tracker)
rpc_hostname = parsed_url.hostname
rpc_port = parsed_url.port or 9090
logger.info("RPC tracker hostname: %s", rpc_hostname)
logger.info("RPC tracker port: %s", rpc_port)
if not args.rpc_key:
raise TVMCException("need to provide an RPC tracker key (--rpc-key) for remote tuning")
else:
rpc_hostname = None
rpc_port = None
tune_model(
tvmc_model,
args.target,
tuning_records=args.output,
prior_records=args.tuning_records,
enable_autoscheduler=args.enable_autoscheduler,
rpc_key=args.rpc_key,
hostname=rpc_hostname,
port=rpc_port,
trials=args.trials,
target_host=args.target_host,
tuner=args.tuner,
min_repeat_ms=args.min_repeat_ms,
early_stopping=args.early_stopping,
desired_layout=args.desired_layout,
timeout=args.timeout,
repeat=args.repeat,
number=args.number,
parallel=args.parallel,
hardware_params=hardware_params,
include_simple_tasks=args.include_simple_tasks,
log_estimated_latency=args.log_estimated_latency,
additional_target_options=reconstruct_target_args(args),
)
def tune_model(
tvmc_model: TVMCModel,
target: str,
tuning_records: Optional[str] = None,
prior_records: Optional[str] = None,
enable_autoscheduler: bool = False,
rpc_key: Optional[str] = None,
hostname: Optional[str] = None,
port: Optional[Union[int, str]] = 9090,
trials: int = 10000,
target_host: Optional[str] = None,
tuner: str = "xgb",
min_repeat_ms: Optional[int] = None,
early_stopping: Optional[int] = None,
desired_layout: Optional[str] = None,
timeout: int = 10,
repeat: int = 1,
number: int = 10,
parallel: int = 4,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
log_estimated_latency: bool = False,
additional_target_options: Optional[Dict[str, Dict[str, Any]]] = None,
):
"""Use tuning to automatically optimize the functions in a model.
Parameters
----------
tvmc_model : TVMCModel
The model to be optimized.
target : str
Compilation target as plain string, inline JSON or path to a JSON file.
tuning_records: str, optional
The path to a file that tuning results will be saved to. If not specified,
a temporary file will be used.
prior_records: str, optional
A path to previous tuning results that will be used to hot-start the tuning
cost model if provided.
enable_autoscheduler : bool, optional
When true, use autoscheduling rather than autotvm. This should produce
faster kernels for compatible model-target pairs.
rpc_key : str, optional
The RPC tracker key of the target device. Required when rpc_tracker is provided.
hostname : str, optional
The IP address of an RPC tracker, used when benchmarking remotely.
port : int or str, optional
The port of the RPC tracker to connect to. Defaults to 9090.
trials : int, optional
The number of schedules to try out for the entire model. Note that the default
value is chosen as a decent average for most models, but larger models may need
more trials to reach a good result while smaller models will converge with fewer
trials.
tuner : str, optional
The type of tuner to use when tuning with autotvm. Can be one of
"ga", "gridsearch", "random", "xgb", "xgb_knob", and "xgb-rank".
min_repeat_ms : int, optional
Minimum time to run each trial. Defaults to 0 on x86 and 1000 on other targets.
early_stopping : int, optional
When specified, stop tuning after this number of trials if results aren't improving.
desired_layout : str, optional
Can be one of "NCHW" or "NHWC". When specified, compatible operations in the graph
will have their layout set to this format. Tasks will then be tuned using this
specified layout.
timeout : int, optional,
If a kernel trial lasts longer than this duration in seconds, it will be
considered a failure.
repeat : int, optional
How many times each measurement should be repeated.
number : int, optional
The number of runs a single repeat is made of.
parallel : int, optional
The maximum number of parallel devices to use when tuning.
hardware_params : auto_scheduler.HardwareParams, optional
When using the autoscheduler, this object defines the configuration of the target hardware.
include_simple_tasks : bool, optional
Whether to extract simple operations or only computationally intensive ones when using
the autoscheduler.
log_estimated_latency : bool, optional
If using the autoscheduler, write the estimated latency at each step of tuning to file.
additional_target_options: Optional[Dict[str, Dict[str, Any]]]
Additional target options in a dictionary to combine with initial Target arguments
Returns
-------
tuning_records : str
The path to the produced tuning log file.
"""
target, extra_targets = target_from_cli(target, additional_target_options)
target, target_host = Target.check_and_update_host_consist(target, target_host)
# TODO(jwfromm) Remove this deepcopy once AlterOpLayout bug that mutates source
# model is fixed. For now, creating a clone avoids the issue.
mod = deepcopy(tvmc_model.mod)
params = tvmc_model.params
if tuning_records is None:
tuning_records = tvmc_model.default_tuning_records_path()
for codegen_from_cli in extra_targets:
codegen = composite_target.get_codegen_by_target(codegen_from_cli["name"])
partition_function = codegen["pass_pipeline"]
mod = partition_function(mod, params, **codegen_from_cli["opts"])
# min_repeat_ms should be:
# a. the value provided by the user, if any, or
# b. 0ms in case target is "cpu"; otherwise 1000ms
if min_repeat_ms is None:
min_repeat_ms = 0 if target.keys[0] == "cpu" else 1000
logger.info("Default --min-repeat-ms for this target is %s", min_repeat_ms)
if rpc_key:
if hostname is None or port is None:
raise TVMCException(
"You must provide a hostname and port to connect to a remote RPC device."
)
if isinstance(port, str):
port = int(port)
logger.info("Tuning will be performed on device %s at %s:%d.", rpc_key, hostname, port)
runner_ctor = auto_scheduler.RPCRunner if enable_autoscheduler else autotvm.RPCRunner
runner = runner_ctor(
key=rpc_key,
host=hostname,
port=port,
number=number,
repeat=repeat,
n_parallel=parallel,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
else:
logger.info("Starting localhost tuning.")
runner_ctor = (
auto_scheduler.LocalRPCMeasureContext if enable_autoscheduler else autotvm.LocalRunner
)
local_server = runner_ctor(
number=number,
repeat=repeat,
timeout=timeout,
min_repeat_ms=min_repeat_ms,
)
# For autoscheduling on some devices, we need to maintain a LocalRPCMeasureContext object.
if enable_autoscheduler:
runner = local_server.runner
else:
runner = local_server
if enable_autoscheduler:
tasks, weights = autoscheduler_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
# Create the autoscheduler tuning options
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=trials,
measure_callbacks=[auto_scheduler.RecordToFile(tuning_records)],
runner=runner,
early_stopping=early_stopping,
)
logger.info("Autoscheduling with configuration: %s", tuning_options)
# Schedule the tasks (i.e., produce a schedule for each task)
schedule_tasks(tasks, weights, tuning_options, prior_records, log_estimated_latency)
else:
tasks = autotvm_get_tuning_tasks(
mod=mod,
params=params,
target=target,
alter_layout=desired_layout,
)
# In autotvm, trials is specified per task. We can convert the per-model input
# provided to per-task trials by dividing by the number of tasks.
trials = int(trials / len(tasks))
logger.info("Autotuning with %d trials per task.", trials)
tuning_options = {
"tuner": tuner,
"trials": trials,
"early_stopping": early_stopping,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func="default"), runner=runner
),
"tuning_records": prior_records,
}
logger.info("Autotuning with configuration: %s", tuning_options)
tune_tasks(tasks, tuning_records, **tuning_options)
return tuning_records
def autotvm_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
alter_layout: Optional[str] = None,
):
"""Get the autotvm tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
alter_layout : str, optional
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
"""
target, target_host = Target.check_and_update_host_consist(target, target_host)
if alter_layout:
mod = convert_graph_layout(mod, alter_layout)
tasks = autotvm.task.extract_from_program(
mod["main"],
target=target,
params=params,
)
return tasks
def autoscheduler_get_tuning_tasks(
mod: tvm.IRModule,
params: Dict[str, tvm.nd.NDArray],
target: str,
target_host: Optional[str] = None,
alter_layout: Optional[str] = None,
hardware_params: Optional[HardwareParams] = None,
include_simple_tasks: bool = False,
):
"""Get the autoscheduler tuning tasks for a given relay module.
Parameters
----------
mod : tvm.IRModule
The relay module from which to extract tuning tasks.
params : dict
The params for the relay module.
target : tvm.target.Target
The compilation target.
target_host : str, optional
The compilation target for the host.
alter_layout : str, optional
The layout to convert the graph to. Note, the convert layout
pass doesn't currently guarantee the whole of the graph will
be converted to the chosen layout.
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
Returns
-------
tasks : list of autotvm.Tasks
list of tasks to be tuned
weights : List[int]
the weight (i.e. the number of appearance) of extracted tasks
"""
target, target_host = Target.check_and_update_host_consist(target, target_host)
if alter_layout:
mod = convert_graph_layout(mod, alter_layout)
# Extract the tasks
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"],
params,
target=target,
hardware_params=hardware_params,
include_simple_tasks=include_simple_tasks,
)
return tasks, task_weights
def schedule_tasks(
tasks: List[auto_scheduler.SearchTask],
task_weights: List[float],
tuning_options: auto_scheduler.TuningOptions,
prior_records: Optional[str] = None,
log_estimated_latency: bool = False,
):
"""Generate the schedules for the different tasks (i.e., subgraphs) contained in the module.
Store the schedules in a json file that will be used later by the compiler.
Parameters
----------
tasks : list
A list of auto_scheduler.SearchTask to tune.
task_weights : list
The weight (i.e. the number of appearance) of extracted tasks
tuning_options: auto_scheduler.TuningOptions
The options of tuning
prior_records : str, optional
The json file used to preload the autoscheduler
log_estimated_latency : bool, optional
If true, writes the estimated runtime of the model during each step of tuning to file.
"""
if not log_estimated_latency:
callbacks = [auto_scheduler.task_scheduler.PrintTableInfo()]
else:
callbacks = [
auto_scheduler.task_scheduler.PrintTableInfo(),
auto_scheduler.task_scheduler.LogEstimatedLatency(("total_latency.tsv")),
]
# Create the scheduler
tuner = auto_scheduler.TaskScheduler(
tasks, task_weights, load_log_file=prior_records, callbacks=callbacks
)
# Tune the tasks
tuner.tune(tuning_options)
def tune_tasks(
tasks: List[autotvm.task.Task],
log_file: str,
measure_option: autotvm.measure_option,
tuner: str,
trials: int,
early_stopping: Optional[int] = None,
tuning_records: Optional[str] = None,
):
"""Tune a list of tasks and output the history to a log file.
Parameters
----------
tasks : list
A list of autotvm.Tasks to tune.
log_file : str
A file to output the tuning history, in JSON.
measure_option : autotvm.measure_option
Options to build and run a tuning task.
tuner : str
Which tuner to use.
trials : int
The maximum number of tuning trials to perform.
early_stopping : int, optional
The minimum number of tuning trials to perform.
This will be equal to 'trials' if not specified.
tuning_records: str, optional
Path to the file produced by the tuning, to be used during
tuning.
"""
if not tasks:
logger.warning("there were no tasks found to be tuned")
return
if not early_stopping:
early_stopping = trials
for i, tsk in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# Create a tuner
if tuner in ("xgb", "xgb-rank"):
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise TVMCException("invalid tuner: %s " % tuner)
# If transfer learning is being used, load the existing results
if tuning_records and os.path.exists(tuning_records):
logger.info("loading tuning records from %s", tuning_records)
start_time = time.time()
tuner_obj.load_history(autotvm.record.load_from_file(tuning_records))
logging.info("loaded history in %.2f sec(s)", time.time() - start_time)
tuner_obj.tune(
n_trial=min(trials, len(tsk.config_space)),
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(trials, prefix=prefix),
autotvm.callback.log_to_file(log_file),
],
)
| |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Deuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test addressindex generation and fetching
#
import time
from test_framework.test_framework import DeuscoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
import binascii
class AddressIndexTest(DeuscoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-relaypriority=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-addressindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-addressindex", "-relaypriority=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-addressindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 0)
# Check p2pkh and p2sh address indexes
print "Testing p2pkh and p2sh address index..."
txid0 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 10)
self.nodes[0].generate(1)
txidb0 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 10)
self.nodes[0].generate(1)
txid1 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 15)
self.nodes[0].generate(1)
txidb1 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 15)
self.nodes[0].generate(1)
txid2 = self.nodes[0].sendtoaddress("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs", 20)
self.nodes[0].generate(1)
txidb2 = self.nodes[0].sendtoaddress("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", 20)
self.nodes[0].generate(1)
self.sync_all()
txids = self.nodes[1].getaddresstxids("mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs")
assert_equal(len(txids), 3)
assert_equal(txids[0], txid0)
assert_equal(txids[1], txid1)
assert_equal(txids[2], txid2)
txidsb = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsb), 3)
assert_equal(txidsb[0], txidb0)
assert_equal(txidsb[1], txidb1)
assert_equal(txidsb[2], txidb2)
# Check that limiting by height works
print "Testing querying txids by range of block heights.."
height_txids = self.nodes[1].getaddresstxids({
"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br"],
"start": 105,
"end": 110
})
assert_equal(len(height_txids), 2)
assert_equal(height_txids[0], txidb0)
assert_equal(height_txids[1], txidb1)
# Check that multiple addresses works
multitxids = self.nodes[1].getaddresstxids({"addresses": ["2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br", "mo9ncXisMeAoXwqcV5EWuyncbmCcQN4rVs"]})
assert_equal(len(multitxids), 6)
assert_equal(multitxids[0], txid0)
assert_equal(multitxids[1], txidb0)
assert_equal(multitxids[2], txid1)
assert_equal(multitxids[3], txidb1)
assert_equal(multitxids[4], txid2)
assert_equal(multitxids[5], txidb2)
# Check that balances are correct
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000)
# Check that outputs with the same address will only return one txid
print "Testing for txid uniqueness..."
addressHash = "6349a418fc4578d10a372b54b45c280cc8c4382f".decode("hex")
scriptPubKey = CScript([OP_HASH160, addressHash, OP_EQUAL])
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
tx.vout = [CTxOut(10, scriptPubKey), CTxOut(11, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
txidsmany = self.nodes[1].getaddresstxids("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(len(txidsmany), 4)
assert_equal(txidsmany[3], sent_txid)
# Check that balances are correct
print "Testing balances..."
balance0 = self.nodes[1].getaddressbalance("2N2JD6wb56AfK4tfmM6PwdVmoYk2dCKf4Br")
assert_equal(balance0["balance"], 45 * 100000000 + 21)
# Check that balances are correct after spending
print "Testing balances after spending..."
privkey2 = "cSdkPxkAjA4HDr5VHgsebAPDEh9Gyub4HK8UJr2DFGGqKKy4K5sG"
address2 = "mgY65WSfEmsyYaYPQaXhmXMeBhwp4EcsQW"
addressHash2 = "0b2f0a0c31bfe0406b0ccc1381fdbe311946dadc".decode("hex")
scriptPubKey2 = CScript([OP_DUP, OP_HASH160, addressHash2, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].importprivkey(privkey2)
unspent = self.nodes[0].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey2)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
spending_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance1 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance1["balance"], amount)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(spending_txid, 16), 0))]
send_amount = 1 * 100000000 + 12840
change_amount = amount - send_amount - 10000
tx.vout = [CTxOut(change_amount, scriptPubKey2), CTxOut(send_amount, scriptPubKey)]
tx.rehash()
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
sent_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.nodes[0].generate(1)
self.sync_all()
balance2 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance2["balance"], change_amount)
# Check that deltas are returned correctly
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 1, "end": 200})
balance3 = 0
for delta in deltas:
balance3 += delta["satoshis"]
assert_equal(balance3, change_amount)
assert_equal(deltas[0]["address"], address2)
assert_equal(deltas[0]["blockindex"], 1)
# Check that entire range will be queried
deltasAll = self.nodes[1].getaddressdeltas({"addresses": [address2]})
assert_equal(len(deltasAll), len(deltas))
# Check that deltas can be returned from range of block heights
deltas = self.nodes[1].getaddressdeltas({"addresses": [address2], "start": 113, "end": 113})
assert_equal(len(deltas), 1)
# Check that unspent outputs can be queried
print "Testing utxos..."
utxos = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos), 1)
assert_equal(utxos[0]["satoshis"], change_amount)
# Check that indexes will be updated with a reorg
print "Testing reorg..."
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
self.nodes[1].invalidateblock(best_hash)
self.nodes[2].invalidateblock(best_hash)
self.nodes[3].invalidateblock(best_hash)
self.sync_all()
balance4 = self.nodes[1].getaddressbalance(address2)
assert_equal(balance4, balance1)
utxos2 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos2), 1)
assert_equal(utxos2[0]["satoshis"], amount)
# Check sorting of utxos
self.nodes[2].generate(150)
txidsort1 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
txidsort2 = self.nodes[2].sendtoaddress(address2, 50)
self.nodes[2].generate(1)
self.sync_all()
utxos3 = self.nodes[1].getaddressutxos({"addresses": [address2]})
assert_equal(len(utxos3), 3)
assert_equal(utxos3[0]["height"], 114)
assert_equal(utxos3[1]["height"], 264)
assert_equal(utxos3[2]["height"], 265)
# Check mempool indexing
print "Testing mempool indexing..."
privKey3 = "cVfUn53hAbRrDEuMexyfgDpZPhF7KqXpS8UZevsyTDaugB7HZ3CD"
address3 = "mw4ynwhS7MmrQ27hr82kgqu7zryNDK26JB"
addressHash3 = "aa9872b5bbcdb511d89e0e11aa27da73fd2c3f50".decode("hex")
scriptPubKey3 = CScript([OP_DUP, OP_HASH160, addressHash3, OP_EQUALVERIFY, OP_CHECKSIG])
address4 = "2N8oFVB2vThAKury4vnLquW2zVjsYjjAkYQ"
scriptPubKey4 = CScript([OP_HASH160, addressHash3, OP_EQUAL])
unspent = self.nodes[2].listunspent()
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent[0]["txid"], 16), unspent[0]["vout"]))]
amount = unspent[0]["amount"] * 100000000
tx.vout = [CTxOut(amount, scriptPubKey3)]
tx.rehash()
signed_tx = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid1 = self.nodes[2].sendrawtransaction(signed_tx["hex"], True)
time.sleep(2)
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(int(unspent[1]["txid"], 16), unspent[1]["vout"]))]
amount = unspent[1]["amount"] * 100000000
tx2.vout = [
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey3),
CTxOut(amount / 4, scriptPubKey4),
CTxOut(amount / 4, scriptPubKey4)
]
tx2.rehash()
signed_tx2 = self.nodes[2].signrawtransaction(binascii.hexlify(tx2.serialize()).decode("utf-8"))
memtxid2 = self.nodes[2].sendrawtransaction(signed_tx2["hex"], True)
time.sleep(2)
mempool = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool), 3)
assert_equal(mempool[0]["txid"], memtxid1)
assert_equal(mempool[0]["address"], address3)
assert_equal(mempool[0]["index"], 0)
assert_equal(mempool[1]["txid"], memtxid2)
assert_equal(mempool[1]["index"], 0)
assert_equal(mempool[2]["txid"], memtxid2)
assert_equal(mempool[2]["index"], 1)
self.nodes[2].generate(1);
self.sync_all();
mempool2 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool2), 0)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(memtxid2, 16), 0)),
CTxIn(COutPoint(int(memtxid2, 16), 1))
]
tx.vout = [CTxOut(amount / 2 - 10000, scriptPubKey2)]
tx.rehash()
self.nodes[2].importprivkey(privKey3)
signed_tx3 = self.nodes[2].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
memtxid3 = self.nodes[2].sendrawtransaction(signed_tx3["hex"], True)
time.sleep(2)
mempool3 = self.nodes[2].getaddressmempool({"addresses": [address3]})
assert_equal(len(mempool3), 2)
assert_equal(mempool3[0]["prevtxid"], memtxid2)
assert_equal(mempool3[0]["prevout"], 0)
assert_equal(mempool3[1]["prevtxid"], memtxid2)
assert_equal(mempool3[1]["prevout"], 1)
# sending and receiving to the same address
privkey1 = "cQY2s58LhzUCmEXN8jtAp1Etnijx78YRZ466w4ikX1V4UpTpbsf8"
address1 = "myAUWSHnwsQrhuMWv4Br6QsCnpB41vFwHn"
address1hash = "c192bff751af8efec15135d42bfeedf91a6f3e34".decode("hex")
address1script = CScript([OP_DUP, OP_HASH160, address1hash, OP_EQUALVERIFY, OP_CHECKSIG])
self.nodes[0].sendtoaddress(address1, 10)
self.nodes[0].generate(1)
self.sync_all()
utxos = self.nodes[1].getaddressutxos({"addresses": [address1]})
assert_equal(len(utxos), 1)
tx = CTransaction()
tx.vin = [
CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["outputIndex"]))
]
amount = utxos[0]["satoshis"] - 1000
tx.vout = [CTxOut(amount, address1script)]
tx.rehash()
self.nodes[0].importprivkey(privkey1)
signed_tx = self.nodes[0].signrawtransaction(binascii.hexlify(tx.serialize()).decode("utf-8"))
mem_txid = self.nodes[0].sendrawtransaction(signed_tx["hex"], True)
self.sync_all()
mempool_deltas = self.nodes[2].getaddressmempool({"addresses": [address1]})
assert_equal(len(mempool_deltas), 2)
# Include chaininfo in results
print "Testing results with chain info..."
deltas_with_info = self.nodes[1].getaddressdeltas({
"addresses": [address2],
"start": 1,
"end": 200,
"chainInfo": True
})
start_block_hash = self.nodes[1].getblockhash(1);
end_block_hash = self.nodes[1].getblockhash(200);
assert_equal(deltas_with_info["start"]["height"], 1)
assert_equal(deltas_with_info["start"]["hash"], start_block_hash)
assert_equal(deltas_with_info["end"]["height"], 200)
assert_equal(deltas_with_info["end"]["hash"], end_block_hash)
utxos_with_info = self.nodes[1].getaddressutxos({"addresses": [address2], "chainInfo": True})
expected_tip_block_hash = self.nodes[1].getblockhash(267);
assert_equal(utxos_with_info["height"], 267)
assert_equal(utxos_with_info["hash"], expected_tip_block_hash)
print "Passed\n"
if __name__ == '__main__':
AddressIndexTest().main()
| |
"""
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer that renders the browsable API.
"""
from __future__ import unicode_literals
import base64
import json
from collections import OrderedDict
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Page
from django.http.multipartparser import parse_header
from django.template import Template, loader
from django.test.client import encode_multipart
from django.utils import six
from django.utils.html import mark_safe
from rest_framework import VERSION, exceptions, serializers, status
from rest_framework.compat import (
INDENT_SEPARATORS, LONG_SEPARATORS, SHORT_SEPARATORS, coreapi,
pygments_css, template_render
)
from rest_framework.exceptions import ParseError
from rest_framework.request import is_form_media_type, override_method
from rest_framework.settings import api_settings
from rest_framework.utils import encoders
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer(object):
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplementedError('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: http://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
separators=separators
)
# On python 2.x json.dumps() returns bytestrings if ensure_ascii=True,
# but if ensure_ascii=False, the return type is underspecified,
# and may (or may not) be unicode.
# On python 3.x json.dumps() returns unicode strings.
if isinstance(ret, six.text_type):
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset. If bytes were returned
# by json.dumps() then we don't have these characters in any case.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return bytes(ret.encode('utf-8'))
return ret
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
if hasattr(self, 'resolve_context'):
# Fallback for older versions.
context = self.resolve_context(data, request, response)
else:
context = self.get_template_context(data, renderer_context)
return template_render(template, context, request=request)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def get_template_context(self, data, renderer_context):
response = renderer_context['response']
if response.exception:
data['status_code'] = response.status_code
return data
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured(
'Returned a template response with no `template_name` attribute set on either the view or response'
)
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
return Template('%d %s' % (response.status_code,
response.status_text.title()))
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context.get('response')
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
if hasattr(self, 'resolve_context'):
context = self.resolve_context(data, request, response)
else:
context = self.get_template_context(data, renderer_context)
return template_render(template, context, request=request)
return data
class HTMLFormRenderer(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/vertical/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.FloatField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.FileField: {
'base_template': 'input.html',
'input_type': 'file'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
},
serializers.FilePathField: {
'base_template': 'select.html',
},
})
def render_field(self, field, parent_style):
if isinstance(field._field, serializers.HiddenField):
return ''
style = dict(self.default_style[field])
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
# Get a clone of the field with text-only value representation.
field = field.as_form_field()
if style.get('input_type') == 'datetime-local' and isinstance(field.value, six.text_type):
field.value = field.value.rstrip('Z')
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = {'field': field, 'style': style}
return template_render(template, context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
renderer_context = renderer_context or {}
form = data.serializer
style = renderer_context.get('style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
style['renderer'] = self
template_pack = style['template_pack'].strip('/')
template_name = template_pack + '/' + self.base_template
template = loader.get_template(template_name)
context = {
'form': form,
'style': style
}
return template_render(template, context)
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
filter_template = 'rest_framework/filters/base.html'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def _get_serializer(self, serializer_class, view_instance, request, *args, **kwargs):
kwargs['context'] = {
'request': request,
'format': self.format,
'view': view_instance
}
return serializer_class(*args, **kwargs)
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
has_serializer = getattr(view, 'get_serializer', None)
has_serializer_class = getattr(view, 'serializer_class', None)
if (
(not has_serializer and not has_serializer_class) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
try:
return self.render_form_for_serializer(existing_serializer)
except TypeError:
pass
if has_serializer:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
else:
# at this point we must have a serializer_class
if method in ('PUT', 'PATCH'):
serializer = self._get_serializer(view.serializer_class, view,
request, instance=instance, **kwargs)
else:
serializer = self._get_serializer(view.serializer_class, view,
request, **kwargs)
return self.render_form_for_serializer(serializer)
def render_form_for_serializer(self, serializer):
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
{'style': {'template_pack': 'rest_framework/horizontal'}}
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if hasattr(view, 'get_serializer') and renderer_class:
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
content = renderer.render(serializer.data, accepted, context)
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
class GenericContentForm(forms.Form):
_content_type = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial,
widget=forms.Select(attrs={'data-override': 'content-type'})
)
_content = forms.CharField(
label='Content',
widget=forms.Textarea(attrs={'data-override': 'content'}),
initial=content
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view, status_code):
if status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
return ''
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path, request)
def get_filter_form(self, data, view, request):
if not hasattr(view, 'get_queryset') or not hasattr(view, 'filter_backends'):
return
# Infer if this is a list view or not.
paginator = getattr(view, 'paginator', None)
if isinstance(data, list):
pass
elif paginator is not None and data is not None:
try:
paginator.get_results(data)
except (TypeError, KeyError):
return
elif not isinstance(data, list):
return
queryset = view.get_queryset()
elements = []
for backend in view.filter_backends:
if hasattr(backend, 'to_html'):
html = backend().to_html(request, queryset, view)
if html:
elements.append(html)
if not elements:
return
template = loader.get_template(self.filter_template)
context = {'elements': elements}
return template_render(template, context)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
csrf_cookie_name = settings.CSRF_COOKIE_NAME
csrf_header_name = getattr(settings, 'CSRF_HEADER_NAME', 'HTTP_X_CSRFToken') # Fallback for Django 1.8
if csrf_header_name.startswith('HTTP_'):
csrf_header_name = csrf_header_name[5:]
csrf_header_name = csrf_header_name.replace('_', '-')
context = {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'view': view,
'request': request,
'response': response,
'user': request.user,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings,
'csrf_cookie_name': csrf_cookie_name,
'csrf_header_name': csrf_header_name
}
return context
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template_render(template, context, request=renderer_context['request'])
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
class AdminRenderer(BrowsableAPIRenderer):
template = 'rest_framework/admin.html'
format = 'admin'
def render(self, data, accepted_media_type=None, renderer_context=None):
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
response = renderer_context['response']
request = renderer_context['request']
view = self.renderer_context['view']
if response.status_code == status.HTTP_400_BAD_REQUEST:
# Errors still need to display the list or detail information.
# The only way we can get at that is to simulate a GET request.
self.error_form = self.get_rendered_html_form(data, view, request.method, request)
self.error_title = {'POST': 'Create', 'PUT': 'Edit'}.get(request.method, 'Errors')
with override_method(view, request, 'GET') as request:
response = view.get(request, *view.args, **view.kwargs)
data = response.data
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template_render(template, context, request=renderer_context['request'])
# Creation and deletion should use redirects in the admin style.
if response.status_code == status.HTTP_201_CREATED and 'Location' in response:
response.status_code = status.HTTP_303_SEE_OTHER
response['Location'] = request.build_absolute_uri()
ret = ''
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_303_SEE_OTHER
try:
# Attempt to get the parent breadcrumb URL.
response['Location'] = self.get_breadcrumbs(request)[-2][1]
except KeyError:
# Otherwise reload current URL to get a 'Not Found' page.
response['Location'] = request.full_path
ret = ''
return ret
def get_context(self, data, accepted_media_type, renderer_context):
"""
Render the HTML for the browsable API representation.
"""
context = super(AdminRenderer, self).get_context(
data, accepted_media_type, renderer_context
)
paginator = getattr(context['view'], 'paginator', None)
if paginator is not None and data is not None:
try:
results = paginator.get_results(data)
except (TypeError, KeyError):
results = data
else:
results = data
if results is None:
header = {}
style = 'detail'
elif isinstance(results, list):
header = results[0] if results else {}
style = 'list'
else:
header = results
style = 'detail'
columns = [key for key in header.keys() if key != 'url']
details = [key for key in header.keys() if key != 'url']
context['style'] = style
context['columns'] = columns
context['details'] = details
context['results'] = results
context['error_form'] = getattr(self, 'error_form', None)
context['error_title'] = getattr(self, 'error_title', None)
return context
class DocumentationRenderer(BaseRenderer):
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
template = 'rest_framework/docs/index.html'
code_style = 'emacs'
languages = ['shell', 'javascript', 'python']
def get_context(self, data, request):
return {
'document': data,
'langs': self.languages,
'code_style': pygments_css(self.code_style),
'request': request
}
def render(self, data, accepted_media_type=None, renderer_context=None):
template = loader.get_template(self.template)
context = self.get_context(data, renderer_context['request'])
return template_render(template, context, request=renderer_context['request'])
class SchemaJSRenderer(BaseRenderer):
media_type = 'application/javascript'
format = 'javascript'
charset = 'utf-8'
template = 'rest_framework/schema.js'
def render(self, data, accepted_media_type=None, renderer_context=None):
codec = coreapi.codecs.CoreJSONCodec()
schema = base64.b64encode(codec.encode(data))
template = loader.get_template(self.template)
context = {'schema': mark_safe(schema)}
request = renderer_context['request']
return template_render(template, context, request=request)
class MultiPartRenderer(BaseRenderer):
media_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
format = 'multipart'
charset = 'utf-8'
BOUNDARY = 'BoUnDaRyStRiNg'
def render(self, data, accepted_media_type=None, renderer_context=None):
if hasattr(data, 'items'):
for key, value in data.items():
assert not isinstance(value, dict), (
"Test data contained a dictionary value for key '%s', "
"but multipart uploads do not support nested data. "
"You may want to consider using format='json' in this "
"test case." % key
)
return encode_multipart(self.BOUNDARY, data)
class CoreJSONRenderer(BaseRenderer):
media_type = 'application/coreapi+json'
charset = None
format = 'corejson'
def __init__(self):
assert coreapi, 'Using CoreJSONRenderer, but `coreapi` is not installed.'
def render(self, data, media_type=None, renderer_context=None):
indent = bool(renderer_context.get('indent', 0))
codec = coreapi.codecs.CoreJSONCodec()
return codec.dump(data, indent=indent)
| |
"""
MySQL database backend for Django.
Requires MySQLdb: http://sourceforge.net/projects/mysql-python
"""
import re
try:
import MySQLdb as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
# We want version (1, 2, 1, 'final', 2) or later. We can't just use
# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
# inadvertently passes the version test.
version = Database.version_info
if (version < (1,2,1) or (version[:3] == (1, 2, 1) and
(len(version) < 5 or version[3] != 'final' or version[4] < 2))):
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE, FLAG
from django.db.backends import *
from django.db.backends.mysql.client import DatabaseClient
from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.safestring import SafeString, SafeUnicode
# Raise exceptions for database warnings if DEBUG is on
from django.conf import settings
if settings.DEBUG:
from warnings import filterwarnings
filterwarnings("error", category=Database.Warning)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
# timedelta in terms of actual behavior as they are signed and include days --
# and Django expects time, so we still need to override that. We also need to
# add special handling for SafeUnicode and SafeString as MySQLdb's type
# checking is too tight to catch those (see Django ticket #6052).
django_conversions = conversions.copy()
django_conversions.update({
FIELD_TYPE.TIME: util.typecast_time,
FIELD_TYPE.DECIMAL: util.typecast_decimal,
FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
})
# This should match the numerical portion of the version numbers (we can treat
# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
# http://dev.mysql.com/doc/refman/5.0/en/news.html .
server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
# point is to raise Warnings as exceptions, this can be done with the Python
# warning module, and this is setup when the connection is created, and the
# standard util.CursorDebugWrapper can be used. Also, using sql_mode
# TRADITIONAL will automatically cause most warnings to be treated as errors.
class CursorWrapper(object):
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, query, args=None):
try:
return self.cursor.execute(query, args)
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def executemany(self, query, args):
try:
return self.cursor.executemany(query, args)
except Database.OperationalError, e:
# Map some error codes to IntegrityError, since they seem to be
# misclassified and Django would prefer the more logical place.
if e[0] in self.codes_for_integrityerror:
raise Database.IntegrityError(tuple(e))
raise
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
related_fields_match_type = True
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, **kwargs):
super(DatabaseWrapper, self).__init__(**kwargs)
self.server_version = None
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient()
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = DatabaseValidation()
def _valid_connection(self):
if self.connection is not None:
try:
self.connection.ping()
return True
except DatabaseError:
self.connection.close()
self.connection = None
return False
def _cursor(self, settings):
if not self._valid_connection():
kwargs = {
'conv': django_conversions,
'charset': 'utf8',
'use_unicode': True,
}
if settings.DATABASE_USER:
kwargs['user'] = settings.DATABASE_USER
if settings.DATABASE_NAME:
kwargs['db'] = settings.DATABASE_NAME
if settings.DATABASE_PASSWORD:
kwargs['passwd'] = settings.DATABASE_PASSWORD
if settings.DATABASE_HOST.startswith('/'):
kwargs['unix_socket'] = settings.DATABASE_HOST
elif settings.DATABASE_HOST:
kwargs['host'] = settings.DATABASE_HOST
if settings.DATABASE_PORT:
kwargs['port'] = int(settings.DATABASE_PORT)
kwargs.update(self.options)
self.connection = Database.connect(**kwargs)
self.connection.encoders[SafeUnicode] = self.connection.encoders[unicode]
self.connection.encoders[SafeString] = self.connection.encoders[str]
cursor = CursorWrapper(self.connection.cursor())
return cursor
def _rollback(self):
try:
BaseDatabaseWrapper._rollback(self)
except Database.NotSupportedError:
pass
def get_server_version(self):
if not self.server_version:
if not self._valid_connection():
self.cursor()
m = server_version_re.match(self.connection.get_server_info())
if not m:
raise Exception('Unable to determine MySQL version from version string %r' % self.connection.get_server_info())
self.server_version = tuple([int(x) for x in m.groups()])
return self.server_version
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import json
except ImportError:
import simplejson as json
import logging
from ambari_client.model.base_model import BaseModel, ModelList
from ambari_client.model import status, component, paths, utils
LOG = logging.getLogger(__name__)
def _get_host(root_resource, host_name):
"""
Lookup up by host_name
@param root_resource: The root Resource object.
@param cluster_name: Cluster name
@param host_name: Host name
@return: A HostModel object
"""
path = paths.HOST_PATH % (host_name)
dic = root_resource.get(path)
return utils.ModelUtils.create_model(
HostModel,
dic,
root_resource,
"Hosts")
def _get_cluster_host(root_resource, cluster_name, host_name):
"""
Lookup cluster host up by host_name
@param root_resource: The root Resource object.
@param cluster_name: Cluster name
@param host_name: Host name
@return: A HostModel object
"""
path = paths.CLUSTER_HOST_PATH % (cluster_name, host_name)
dic = root_resource.get(path)
return utils.ModelUtils.create_model(
HostModel,
dic,
root_resource,
"Hosts")
def _create_hosts(root_resource, host_list):
"""
Create hosts from list
@param root_resource: The root Resource.
@param host_name: Host name
@param ip: IP address
@param rack_info: Rack id. Default None
@return: An HostList object
"""
data = [{"Hosts": {"host_name": x.host_name,
"ip": x.ip,
"rack_info": x.rack_info}} for x in host_list]
resp = root_resource.post(paths.HOSTS_PATH, payload=data)
return utils.ModelUtils.create_model(
status.StatusModel,
resp,
root_resource,
"NO_KEY")
def _create_host(root_resource, host_name, ip, rack_info=None):
"""
Create a host
@param root_resource: The root Resource.
@param host_name: Host name
@param ip: IP address
@param rack_info: Rack id. Default None
@return: An HostModel object
"""
host_list = ModelList([HostModel(root_resource, host_name, ip, rack_info)])
return _create_hosts(root_resource, host_list)
def _add_hosts(root_resource, cluster_name, host_list):
"""
Adds a hosts to a cluster.
@param root_resource: The root Resource object.
@param cluster_name: Cluster name
@param host_list: list of hosts
@return: A StatusModel object
"""
cpath = paths.HOSTS_CREATE_PATH % (cluster_name)
data = [{"Hosts": {"host_name": x.host_name,
"ip": x.ip,
"rack_info": x.rack_info}} for x in host_list]
resp = root_resource.post(path=cpath, payload=data)
return utils.ModelUtils.create_model(
status.StatusModel,
resp,
root_resource,
"NO_KEY")
def _add_host(root_resource, cluster_name, host_name, ip, rack_info=None):
"""
Adds a host to a cluster.
@param host_name: Host name
@param ip: ip of Host
@param rack_info: rack information
@return: StatusModel.
"""
host_list = ModelList([HostModel(root_resource, host_name, ip, rack_info)])
return _add_hosts(root_resource, cluster_name, host_list)
def _assign_role(root_resource, cluster_name, host_name, component_name):
"""
Add a new component to a node
@param root_resource: The root Resource object.
@param cluster_name: Cluster name
@param component_name : name of component.
@param host_name: name of host
@return: StatusModel
"""
data = {"host_components": [
{"HostRoles": {"component_name": component_name}}]}
cpath = paths.HOSTS_ASSIGN_ROLE % (cluster_name, host_name)
resp = root_resource.post(path=cpath, payload=data)
return utils.ModelUtils.create_model(
status.StatusModel,
resp,
root_resource,
"NO_KEY")
def _get_all_hosts(root_resource):
"""
Get all hosts
@param root_resource: The root Resource.
@return: A list of HostModel objects.
"""
dic = root_resource.get(paths.HOSTS_PATH)
return utils.ModelUtils.get_model_list(
ModelList,
HostModel,
dic,
root_resource,
"Hosts")
def _get_all_cluster_hosts(root_resource, cluster_name):
"""
Get all hosts in the cluster
@param root_resource: The root Resource.
@param cluster_name: The name of the cluster.
@return: A list of HostModel objects.
"""
path = paths.CLUSTER_HOSTS_PATH % (cluster_name)
path = path + '?fields=*'
dic = root_resource.get(path)
return utils.ModelUtils.get_model_list(
ModelList,
HostModel,
dic,
root_resource,
"Hosts")
def _delete_host(root_resource, host_name):
"""
Delete a host by id
@param root_resource: The root Resource object.
@param host_name: Host name
@return: StatusModel object
"""
resp = root_resource.delete(paths.HOST_PATH % (host_name))
return utils.ModelUtils.create_model(
status.StatusModel,
resp,
root_resource,
"NO_KEY")
def _delete_cluster_host(root_resource, cluster_name, host_name):
"""
Delete a host by id
@param root_resource: The root Resource object.
@param host_name: Host name
@param cluster_name: cluster name
@return: StatusModel object
"""
path = paths.CLUSTER_HOST_PATH % (cluster_name, host_name)
resp = root_resource.delete(path)
return utils.ModelUtils.create_model(
status.StatusModel,
resp,
root_resource,
"NO_KEY")
def _bootstrap_hosts(root_resource, hosts_list, ssh_key, ssh_user):
"""
Bootstrap hosts.
@param hosts_list list of host_names.
@return: A StatusModel object.
"""
payload_dic = {
"verbose": True,
"sshKey": ssh_key,
"hosts": hosts_list,
"user": ssh_user}
resp = root_resource.post(
paths.BOOTSTRAP_PATH,
payload_dic,
content_type="application/json")
status_dict = _bootstrap_resp_to_status_dict(resp)
return utils.ModelUtils.create_model(
status.StatusModel,
status_dict,
root_resource,
"NO_KEY")
def _bootstrap_resp_to_status_dict(resp):
"""
Bootstrap response has a little odd format
that's why we have to convert it to the normal
format to handle it properly later.
"""
# if we got other response, like an error 400 happened on higher level
if isinstance(resp['status'], int):
return resp
new_resp = {}
if resp['status'] == "OK":
new_resp['status'] = 201
else: # ERROR
new_resp['status'] = 500
new_resp['message'] = resp['log']
new_resp['requestId'] = resp['requestId']
return new_resp
class HostModel(BaseModel):
"""
The HostModel class
"""
RO_ATTR = ('host_state', 'public_host_name')
RW_ATTR = ('host_name', 'ip', 'rack_info')
REF_ATTR = ('cluster_name',)
def __init__(
self,
resource_root,
host_name,
ip=None,
rack_info='/default-rack'):
utils.retain_self_helper(BaseModel, **locals())
def __str__(self):
return "<<HostModel>> hostname = %s; ip = %s ; rack_info = %s" % (
self.host_name, self.ip, self.rack_info)
def _get_cluster_name(self):
if self.clusterRef:
return self.clusterRef.cluster_name
return None
def _path(self):
return paths.HOSTS_PATH + '/' + self.host_name
def get_host_components(self, detail=None):
"""
Get a specific host's components.
@return: A ModelList containing ComponentModel objects.
"""
return component.get_host_components(
self._get_resource_root(),
self._get_cluster_name(),
self.host_name)
def get_host_component(self, component_name, detail=None):
"""
Get a specific host's ,specific component.
@param component_name : name of component.
@return: A ComponentModel object.
"""
return component.get_host_component(
self._get_resource_root(),
self._get_cluster_name(),
self.host_name,
component_name)
def assign_role(self, component_name, detail=None):
"""
Assign a component role to the host
@param component_name : name of component.
@return: StatusModel.
"""
return _assign_role(
self._get_resource_root(),
self._get_cluster_name(),
self.host_name,
component_name)
def install_all_components(self):
root_resource = self._get_resource_root()
path = paths.HOSTS_COMPONENTS_PATH % (self._get_cluster_name(),
self.host_name)
data = {
"RequestInfo": {
"context" :"Install All Components",
},
"Body": {
"HostRoles": {"state": "INSTALLED"},
},
}
resp = root_resource.put(path=path, payload=data)
return utils.ModelUtils.create_model(status.StatusModel, resp,
root_resource, "NO_KEY")
def start_all_components(self):
root_resource = self._get_resource_root()
path = paths.HOSTS_COMPONENTS_PATH % (self._get_cluster_name(),
self.host_name)
data = {
"RequestInfo": {
"context" :"Start All Components",
},
"Body": {
"HostRoles": {"state": "STARTED"},
},
}
resp = root_resource.put(path=path, payload=data)
return utils.ModelUtils.create_model(status.StatusModel, resp,
root_resource, "NO_KEY")
def stop_all_components(self):
root_resource = self._get_resource_root()
path = paths.HOSTS_COMPONENTS_PATH % (self._get_cluster_name(),
self.host_name)
data = {
"RequestInfo": {
"context" :"Stop All Components",
},
"Body": {
"HostRoles": {"state": "INSTALLED"},
},
}
resp = root_resource.put(path=path, payload=data)
return utils.ModelUtils.create_model(status.StatusModel, resp,
root_resource, "NO_KEY")
def enable_maintenance_mode(self):
root_resource = self._get_resource_root()
path = paths.HOSTS_COMPONENTS_PATH % (self._get_cluster_name(),
self.host_name)
data = {
"RequestInfo": {
"context" :"Start Maintanence Mode",
},
"Body": {
"HostRoles": {"maintenance_state": "ON"},
},
}
resp = root_resource.put(path=path, payload=data)
return utils.ModelUtils.create_model(status.StatusModel, resp,
root_resource, "NO_KEY")
def disable_maintenance_mode(self):
root_resource = self._get_resource_root()
path = paths.HOSTS_COMPONENTS_PATH % (self._get_cluster_name(),
self.host_name)
data = {
"RequestInfo": {
"context" :"Stop Maintanence Mode",
},
"Body": {
"HostRoles": {"maintenance_state": "OFF"},
},
}
resp = root_resource.put(path=path, payload=data)
return utils.ModelUtils.create_model(status.StatusModel, resp,
root_resource, "NO_KEY")
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
from paddle.fluid.framework import default_main_program, default_startup_program, program_guard
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid import unique_name
from paddle.fluid import core
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.nn import autoincreased_step_counter
from paddle.fluid.framework import Variable
from paddle.fluid.executor import global_scope
from paddle.fluid.transpiler.inference_transpiler import InferenceTranspiler
__all__ = ['QuantizeTranspiler']
_QUANTIZABLE_OP_TYPES = ['conv2d', 'depthwise_conv2d', 'mul']
def _quantized_var_name(var_name):
"""
Return quantized variable name for the input `var_name`.
"""
return "%s.quantized" % (var_name)
def _dequantized_var_name(var_name):
"""
Return dequantized variable name for the input `var_name`.
"""
return "%s.dequantized" % (var_name)
def _quantized_scale_name(var_name):
"""
Return quantized variable name for the input `var_name`.
"""
return "%s.scale" % (var_name)
def _original_var_name(var_name):
"""
Return the original variable name.
"""
if var_name.endswith('.quantized.dequantized'):
return var_name[:-len('.quantized.dequantized')]
if var_name.endswith('.quantized'):
return var_name[:-len('.quantized')]
if var_name.endswith('.dequantized'):
return var_name[:-len('.dequantized')]
if var_name.endswith('.scale'):
return var_name[:-len('.scale')]
else:
return var_name
def _is_float(v):
return isinstance(v, float) or isinstance(v, np.float32)
def quant(x, scale, num_bits):
y = np.round(x / scale * ((1 << (num_bits - 1)) - 1))
return y
class QuantizeTranspiler(object):
def __init__(self,
weight_bits=8,
activation_bits=8,
activation_quantize_type='abs_max',
weight_quantize_type='abs_max',
window_size=10000):
"""
Convert and rewrite the fluid Program according to weight and
activation quantization type.
Args:
weight_bits (int): quantization bit number for weights,
the bias is not quantized.
activation_bits (int): quantization bit number for activation.
activation_quantize_type (str): quantization type for activation,
now support 'abs_max', 'range_abs_max'. If use 'abs_max' mode,
the quantization scale will be calculated dynamically each step
in both training and testing period. If use 'range_abs_max',
a static quantization scale will be calculated during training
and used in inference.
weight_quantize_type (str): quantization type for weights,
support 'abs_max'. The 'range_abs_max' usually is not used for
weight, since weights are fixed once the model is well trained.
window_size (int): the window size for 'range_abs_max' quantization.
Examples:
.. code-block:: python
# the original program will be rewrite, if you don't want to
# change it, please clone at first.
# quantize_program = program.clone()
t = fluid.QuantizeTranspiler()
t.transpile(quantize_program)
"""
self.weight_bits = weight_bits
self.activation_bits = activation_bits
quant_type = ['abs_max', 'range_abs_max']
if weight_quantize_type not in quant_type:
raise ValueError(
"Unknown weight_quantize_type: '%s'. It can only be ",
"'abs_max' or 'range_abs_max'.", str(weight_quantize_type))
if activation_quantize_type not in quant_type:
raise ValueError(
"Unknown activation_quantize_type : '%s'. It can only be ",
"'abs_max' or 'range_abs_max'.", str(activation_quantize_type))
self.weight_quantize_type = weight_quantize_type
self.activation_quantize_type = activation_quantize_type
self.window_size = window_size
self.helper = LayerHelper(self.__class__.__name__)
self.fake_quant_op_types = [
'fake_quantize_abs_max', 'fake_quantize_range_abs_max'
]
self.fake_dequant_op_types = ['fake_dequantize_max_abs']
self.is_test = None
self.global_step = None
def training_transpile(self, program=None, startup_program=None):
"""Rewrites a training input program in place for simulated
quantization. Insert fake quantization and de-quantization ops into
program to simulate the error introduced by quantization. And change
the graident ops' input by using the faked quantization weights and
activation. Since the program is transformed in place, the graph
connection will change.
Args:
program (Program): the input program to be transpile.
"""
self.is_test = False
program = default_main_program() if program is None else program
startup_program = default_startup_program() if startup_program is \
None else startup_program
# marked the variable which has been quantized and dequantized.
dequanted_vars = [
collections.OrderedDict() for _ in range(len(program.blocks))
]
grad_op_types = ['%s_grad' % (type) for type in _QUANTIZABLE_OP_TYPES]
params = [p.name for p in program.global_block().iter_parameters()]
def _transpile_forward(block, op):
idx = block.ops.index(op)
block_id = block.idx
# insert quant op and dequant op
for name in op.input_arg_names:
if name in dequanted_vars[block_id]:
dequant_var = dequanted_vars[block_id][name]
else:
var = block.var(name)
quant_bits = self.weight_bits if var.name in params \
else self.activation_bits
quant_type = self.weight_quantize_type if var.name \
in params else self.activation_quantize_type
quant_var, scale_var = self._insert_quant_op(
block, idx, var, quant_bits, quant_type)
dequant_var = self._insert_dequant_op(
block, idx + 1, quant_var, scale_var, quant_bits)
dequanted_vars[block_id][name] = dequant_var
# rename the forward op inputs
op._rename_input(name, dequant_var.name)
def _transpile_backward(block, op):
block_id = block.idx
no_dequanted_input_vars = True
for name in op.input_arg_names:
if name in dequanted_vars[block_id]:
dequant_var = dequanted_vars[block_id][name]
op._rename_input(name, dequant_var.name)
no_dequanted_input_vars = False
if no_dequanted_input_vars:
raise ValueError("There is no dequanted inputs for op %s." %
(op.type))
with program_guard(program, startup_program):
self._create_global_step()
for block in program.blocks:
ops = list(block.ops)
block_id = block.idx
for op in ops:
# rewrite the forward ProgramDes
if op.type in _QUANTIZABLE_OP_TYPES:
_transpile_forward(block, op)
# rename the backward op inputs
if op.type in grad_op_types:
_transpile_backward(block, op)
def _create_global_step(self):
if self.weight_quantize_type == 'range_abs_max' or \
self.activation_quantize_type == 'range_abs_max':
self.global_step = autoincreased_step_counter()
def freeze_program(self, program, place, fuse_bn=False, scope=None):
"""Freeze input training program for inference.
Args:
program (Program): the input program to be transpile.
"""
self.is_test = True
scope = global_scope() if scope is None else scope
program = default_main_program() if program is None else program
if fuse_bn:
bn_fuse_transpiler = BNFuseTranspiler()
bn_fuse_transpiler.transpile(program, place)
persistable_vars = [
v.name
for v in filter(lambda var: var.persistable, program.list_vars())
]
op_in_rename_map = [
collections.OrderedDict() for _ in range(len(program.blocks))
]
op_out_rename_map = [
collections.OrderedDict() for _ in range(len(program.blocks))
]
var_scale_map = [
collections.OrderedDict() for _ in range(len(program.blocks))
]
def _remove_fake_quant_and_dequant_op(block, op):
idx = block.ops.index(op)
block_id = block.idx
k = op.output('Out')[0]
v = op.input('X')[0]
if v not in op_in_rename_map[block_id]:
op_in_rename_map[block_id][k] = v
else:
op_in_rename_map[block_id][k] = op_in_rename_map[block_id][v]
block._remove_op(idx)
def _insert_post_dequant_op(block, op):
idx = block.ops.index(op)
block_id = block.idx
max_range = None
scale_var = None
for name in op.input_arg_names:
if name in op_in_rename_map[block_id]:
op._rename_input(name, op_in_rename_map[block_id][name])
scale_v = var_scale_map[block_id][_original_var_name(name)]
if _original_var_name(name) in persistable_vars:
param_range = (1 << (self.weight_bits - 1)) - 1
act_range = (1 << (self.activation_bits - 1)) - 1
assert _is_float(scale_v)
max_range = param_range * act_range / scale_v
else:
assert isinstance(scale_v, Variable)
scale_var = var_scale_map[block_id][_original_var_name(
name)]
if len(op.output_arg_names) != 1:
raise ValueError("Only support one output, but op %s has"
" more than one output." % (op.type))
out_var = block.var(op.output_arg_names[0])
dequant_var = block.create_var(
name=_dequantized_var_name(out_var.name),
type=out_var.type,
shape=out_var.shape,
dtype=out_var.dtype)
# insert fake_dequantize_op
dequant_op = block._insert_op(
idx + 1,
type="fake_dequantize_max_abs",
attrs={'max_range': float(max_range)},
inputs={"X": out_var,
'Scale': scale_var},
outputs={"Out": dequant_var})
op_out_rename_map[block_id][out_var.name] = dequant_var.name
return dequant_var
def _load_var(name):
return np.array(scope.find_var(name).get_tensor())
def _restore_var(name, arr):
t = scope.find_var(name).get_tensor()
t.set(arr, place)
for block in program.blocks:
ops = list(block.ops)
block_id = block.idx
for op in ops:
op_type = op.type
# insert dequant_op after fc/conv, need to rename
# input of the followed ops
for name in op.input_arg_names:
if name in op_out_rename_map[block_id]:
op._rename_input(name,
op_out_rename_map[block_id][name])
if op_type in self.fake_quant_op_types:
in_arg_name = op.input('X')[0]
if in_arg_name in persistable_vars:
if self.weight_quantize_type == 'abs_max':
param = _load_var(in_arg_name)
scale_v = np.max(np.abs(param))
else:
scale_v = _load_var(op.output('OutScale')[0])
var_scale_map[block_id][in_arg_name] = scale_v
else:
scale_v = block.var(op.output('OutScale')[0])
var_scale_map[block_id][in_arg_name] = scale_v
if in_arg_name in persistable_vars:
_remove_fake_quant_and_dequant_op(block, op)
# quantize weight and restore
param_t = _load_var(in_arg_name)
param_q_t = quant(param_t, scale_v, self.weight_bits)
_restore_var(in_arg_name, param_q_t)
if op_type in self.fake_dequant_op_types:
_remove_fake_quant_and_dequant_op(block, op)
if op_type in _QUANTIZABLE_OP_TYPES:
dequant_var = _insert_post_dequant_op(block, op)
# remove the unused var in ProgramDesc
self._remove_unused_var(program)
#program = program.clone()
def convert_to_int8(self, program, place, scope=None):
scope = global_scope() if scope is None else scope
program = default_main_program() if program is None else program
def _load_var(name):
return np.array(scope.find_var(name).get_tensor())
global_block = program.global_block()
def convert_to_int8(var):
int8_var_name = var.name + ".int8"
int8_var = global_block.create_parameter(
name=int8_var_name.encode('ascii'),
type=var.type,
dtype=core.VarDesc.VarType.INT8,
shape=var.shape)
tensor = _load_var(var.name)
scope.var(int8_var_name)
int8_tensor = scope.find_var(int8_var_name).get_tensor()
int8_tensor.set(tensor.astype(np.int8), place)
return int8_var
input_map = {}
for block in program.blocks:
for op in list(block.ops):
if op.type in _QUANTIZABLE_OP_TYPES:
for name in op.input_arg_names:
var = block.var(name)
if var.persistable:
if name not in input_map:
int8_var = convert_to_int8(var)
input_map[name] = int8_var.name
op._rename_input(name, input_map[name])
self._remove_unused_var(program)
def _remove_unused_var(self, program):
all_remove_vars = []
for block in program.blocks:
args = []
for op in block.ops:
args += op.input_arg_names
args += op.output_arg_names
args = list(set(args))
var_names = block.vars.keys()
sub_block_remove_vars = []
for var in var_names:
if var not in args:
sub_block_remove_vars.append(var)
all_remove_vars.append(sub_block_remove_vars)
remove_vars = [list(set(v)) for v in all_remove_vars]
for i, block in enumerate(program.blocks):
for v in remove_vars[i]:
block._remove_var(v)
def _insert_quant_abs_max_op(self, block, idx, var, quant_bits):
"""Insert fake_quantize_abs_max op.
"""
quant_var = block.create_var(
name=_quantized_var_name(var.name),
type=var.type,
shape=var.shape,
dtype=var.dtype)
scale = block.create_var(
name=_quantized_scale_name(var.name),
type=var.type,
shape=var.shape,
dtype=var.dtype)
quant_op = block._insert_op(
idx,
type='fake_quantize_abs_max',
attrs={'bit_length': quant_bits},
inputs={'X': var},
outputs={'Out': quant_var,
'OutScale': scale})
return quant_var, scale
def _insert_quant_range_abs_max_op(self, block, idx, var, quant_bits):
"""Insert fake_quantize_range_abs_max
"""
quant_var = block.create_var(
name=_quantized_var_name(var.name),
type=var.type,
shape=var.shape,
dtype=var.dtype)
scale = self.helper.create_parameter(
attr=ParamAttr(
name=_quantized_scale_name(var.name),
initializer=Constant(0.001),
trainable=False),
shape=[1],
dtype=var.dtype)
scale.stop_gradient = True
ins = {'X': var, 'InScale': scale}
outs = {'Out': quant_var, 'OutScale': scale}
if not self.is_test:
# A global step counter variable with type int64
scales = self.helper.create_global_variable(
name=unique_name.generate('scales'),
persistable=True,
dtype=var.dtype,
shape=[self.window_size])
self.helper.set_variable_initializer(
scales, initializer=Constant(value=0))
ins['Iter'] = self.global_step
outs['OutScales'] = scales
attrs = {
'window_size': self.window_size,
'bit_length': quant_bits,
'is_test': self.is_test
}
quant_op = block._insert_op(
idx,
type='fake_quantize_range_abs_max',
attrs=attrs,
inputs=ins,
outputs=outs)
return quant_var, scale
def _insert_quant_op(self, block, idx, var, quant_bits, quant_type):
"""
Insert fake_quantize_op
"""
if quant_type == 'abs_max':
return self._insert_quant_abs_max_op(block, idx, var, quant_bits)
elif quant_type == 'range_abs_max':
return self._insert_quant_range_abs_max_op(block, idx, var,
quant_bits)
def _insert_dequant_op(self, block, idx, var, scale, quant_bits):
"""
Insert fake_quantize_op
"""
dequant_var = block.create_var(
name=_dequantized_var_name(var.name),
type=var.type,
shape=var.shape,
dtype=var.dtype)
# insert fake_dequantize_op
max_range = (1 << (quant_bits - 1)) - 1
dequant_op = block._insert_op(
idx,
type="fake_dequantize_max_abs",
attrs={'max_range': float(max_range)},
inputs={"X": var,
'Scale': scale},
outputs={"Out": dequant_var})
return dequant_var
class BNFuseTranspiler(InferenceTranspiler):
def _fuse_param(self, current_op, bn_op, bias_op, with_bias):
def _update_param(op, param_name, new_param):
var = self.block.vars[param_name]
tensor = self.scope.find_var(param_name).get_tensor()
tensor.set(np.array(new_param), self.place)
def _load_param(param_name):
return np.array(self.scope.find_var(param_name).get_tensor())
bias_bn = _load_param(bn_op.input("Bias")[0]) #Bias
scale_bn = _load_param(bn_op.input("Scale")[0]) #Scale
mean_bn = _load_param(bn_op.input("Mean")[0]) #Mean
var_bn = _load_param(bn_op.input("Variance")[0]) #Variance
if current_op.type in ['conv2d', 'depthwise_conv2d']:
current_param = _load_param(
_original_var_name(current_op.input("Filter")[0]))
elif current_op.type == 'mul':
current_param = _load_param(
_original_var_name(current_op.input("Y")[0]))
std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5)))
tmp = np.float32(np.divide(scale_bn, std_bn))
# add bias of batch_norm_op to conv2d
if with_bias:
bias = _load_param(bias_op.input("Y"))
else:
bias = np.zeros(bias_bn.shape)
bias = np.float32(
np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn))
# re-compute weight of conv2d/fc
tmp = tmp.reshape(tmp.shape[0], -1)
dst_param = current_param.reshape((tmp.shape[0], -1))
dst_param = np.float32(np.multiply(dst_param, tmp))
dst_param = dst_param.reshape(current_param.shape)
# update parameters
if current_op.type in ['conv2d', 'depthwise_conv2d']:
_update_param(current_op,
_original_var_name(current_op.input("Filter")[0]),
dst_param)
elif current_op.type == 'mul':
_update_param(current_op,
_original_var_name(current_op.input("Y")[0]),
dst_param)
_update_param(bias_op, bias_op.input("Y")[0], bias)
# collect the renamed input
self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0]
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import mox
from cinder import context
from cinder import db
from cinder import exception
from cinder import flags
from cinder import manager
from cinder.openstack.common import cfg
from cinder import service
from cinder import test
from cinder import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="cinder.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
default=None,
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"), ]
flags.FLAGS.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests"""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'cinder.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'manager')
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'cinder.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'service')
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'cinder-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(not ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'cinder-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(ref['disabled'])
class ServiceTestCase(test.TestCase):
"""Test cases for Services"""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
def test_create(self):
host = 'foo'
binary = 'cinder-fake'
topic = 'fake'
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=host, binary=binary, topic=topic)
self.assert_(app)
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'cinder.tests.test_service.FakeManager')
serv.start()
serv.report_state()
self.assert_(serv.model_disconnected)
def test_report_state_newly_connected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'cinder.tests.test_service.FakeManager')
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assert_(not serv.model_disconnected)
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
self.assertEquals(0, test_service.port)
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
self.assertEquals(0, self.service.port)
launcher = service.Launcher()
launcher.launch_server(self.service)
self.assertEquals(0, self.service.port)
launcher.stop()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
class LinearClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age': tf.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.sparse_column_with_hash_bucket('age', 2)
classifier = tf.contrib.learn.LinearClassifier(
_joint_weight=True,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = tf.contrib.layers.real_valued_column('', dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertEqual(4, len(classifier.weights_))
self.assertEqual(3, len(classifier.bias_))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
def _optimizer():
return tf.train.FtrlOptimizer(learning_rate=0.1)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer=_optimizer,
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = tf.contrib.layers.real_valued_column('feature',
dimension=4)
classifier = tf.contrib.learn.LinearClassifier(
n_classes=3,
optimizer='Ftrl',
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1], [0], [0], [0]], dtype=tf.float32)
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, labels))
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(
tf.constant([[1], [2]]), num_epochs=num_epochs),
}, tf.constant([[.7], [0]], dtype=tf.float32)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = tf.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=sparse_features,
config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age': tf.train.limit_epochs(tf.constant([1]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(classifier.predict(input_fn=predict_input_fn,
as_iterable=True))
out1_proba = list(classifier.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
del classifier
classifier2 = tf.contrib.learn.LinearClassifier(
model_dir=model_dir,
feature_columns=[age, language])
out2_class = list(classifier2.predict(input_fn=predict_input_fn,
as_iterable=True))
out2_proba = list(classifier2.predict_proba(input_fn=predict_input_fn,
as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = tf.contrib.learn.LinearClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': tf.constant([[20], [20], [20]]),
'weights': tf.constant([[100], [1], [1]]),
}
labels = tf.constant([[1], [0], [0]])
return features, labels
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age],
weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('centered_bias_weight', classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language': tf.SparseTensor(values=['hindi'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier_no_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language])
classifier_with_reg = tf.contrib.learn.LinearClassifier(
feature_columns=[language],
optimizer=tf.train.FtrlOptimizer(learning_rate=1.0,
l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(
input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language': tf.SparseTensor(values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, tf.constant([[1], [1], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
classifier = tf.contrib.learn.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'maintenance_cost': tf.constant([[500.0], [200.0]]),
'sq_footage': tf.constant([[800.0], [600.0]]),
'weights': tf.constant([[1.0], [1.0]])
}, tf.constant([[0], [1]])
maintenance_cost = tf.contrib.layers.real_valued_column('maintenance_cost')
sq_footage = tf.contrib.layers.real_valued_column('sq_footage')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id': tf.constant(['1', '2']),
'dense_feature': tf.constant([[500.0, 800.0], [200.0, 600.0]])
}, tf.constant([[0], [1]])
dense_feature = tf.contrib.layers.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [600.0], [700.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
symmetric_l2_regularization=1.0)
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClasssifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.SparseTensor(values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, tf.constant([[1], [0], [1]])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = tf.contrib.layers.weighted_sparse_column(
country, 'price')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_weighted_by_price],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'language': tf.SparseTensor(values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country': tf.SparseTensor(values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, tf.constant([[0], [0], [1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = tf.contrib.layers.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[country_language],
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClasssifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age': tf.constant([[1], [2]]),
'language': tf.SparseTensor(values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, tf.constant([[1], [0]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearClassifier(
feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
dense_shape=[1, 1])
}, tf.constant([[10.]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.LinearRegressor(
feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=cont_features,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = tf.contrib.learn.LinearRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predictions, atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant(labels, dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predictions, atol=0.1)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, labels
def _my_metric_op(predictions, labels):
return tf.reduce_sum(tf.mul(predictions, labels))
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric': MetricSpec(metric_fn=_my_metric_op,
prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
tf.contrib.metrics.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns)
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7),
tf.contrib.layers.real_valued_column('age')
]
tf_config = {
'cluster': {
tf.contrib.learn.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with tf.test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = tf.train.ClusterSpec({})
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, tf.constant([1.0, 0., 0.2], dtype=tf.float32)
feature_columns = [
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=20),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(x)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
optimizer=tf.train.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor.weights_.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'x': tf.constant(x),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant(y)
x_column = tf.contrib.layers.real_valued_column('x', dimension=3)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertAllClose([w[0] for w in weights],
regressor.weights_.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights': tf.constant([[3.0], [5.0], [7.0]])
}, tf.constant([[1.55], [-1.25], [-3.0]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClasssifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.4], [0.6], [0.3]]),
'country': tf.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights': tf.constant([[10.0], [10.0], [10.0]])
}, tf.constant([[1.4], [-0.8], [2.6]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
no_l1_reg_weights = regressor.weights_
# Regressor with L1 regularization.
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = regressor.weights_
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder': tf.constant([[0.0]]*num_examples),
}, tf.constant([[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = tf.contrib.layers.real_valued_column('place_holder')
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[place_holder],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(regressor.get_variable_value('linear/bias_weight')[0],
0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[x] for x in
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half/10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half/10)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.2, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClasssifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples/2)
return {
'example_id': tf.constant([str(x+1) for x in range(num_examples)]),
'a': tf.constant([[1]]*int(half) + [[0]]*int(half)),
'b': tf.constant([[0]]*int(half) + [[1]]*int(half)),
}, tf.constant([[1 if x%10 == 0 else 0] for x in range(half)] +
[[-1 if x%10 == 0 else 0] for x in range(half)])
sdca_optimizer = tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id')
regressor = tf.contrib.learn.LinearRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('a'),
tf.contrib.layers.real_valued_column('b')],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(regressor.weights_['linear/a/weight'][0], 0.1, err=0.05)
self.assertNear(regressor.weights_['linear/b/weight'][0], -0.1, err=0.05)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
labels = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, labels
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
| |
try:
import unittest.mock as mock
except ImportError:
import mock
from pytest import fixture, raises
from staticjinja import cli, make_site, Reloader
@fixture
def filename():
return "test.txt"
@fixture
def template_path(tmpdir):
return tmpdir.mkdir("templates")
@fixture
def build_path(tmpdir):
return tmpdir.mkdir("build")
@fixture
def site(template_path, build_path):
template_path.join('.ignored1.html').write('Ignored 1')
template_path.join('_partial1.html').write('Partial 1')
template_path.join('template1.html').write('Test 1')
template_path.join('template2.html').write('Test 2')
template_path.mkdir('sub').join('template3.html').write('Test {{b}}')
template_path.mkdir('static_css').join('hello.css').write(
'a { color: blue; }'
)
template_path.mkdir('static_js').join('hello.js').write(
'var a = function () {return true};'
)
template_path.join('favicon.ico').write('Fake favicon')
contexts = [('template2.html', lambda t: {'a': 1}),
('.*template3.html', lambda: {'b': 3}), ]
rules = [('template2.html', lambda env, t, a: None), ]
return make_site(searchpath=str(template_path),
outpath=str(build_path),
contexts=contexts,
rules=rules)
@fixture
def reloader(site):
return Reloader(site)
def test_template_names(site):
site.staticpaths = ["static_css", "static_js", "favicon.ico"]
expected_templates = set(['template1.html',
'template2.html',
'sub/template3.html'])
assert set(site.template_names) == expected_templates
def test_templates(site):
expected = list(site.template_names)
assert [t.name for t in site.templates] == expected
def test_get_context(site):
assert site.get_context(site.get_template("template1.html")) == {}
assert site.get_context(
site.get_template("template2.html")
) == {'a': 1}
assert site.get_context(
site.get_template("sub/template3.html")
) == {'b': 3}
def test_get_rule(site):
with raises(ValueError):
assert site.get_rule('template1.html')
assert site.get_rule('template2.html')
def test_get_dependencies(site, filename):
site.get_template = lambda x: filename
assert site.get_dependencies(".%s" % filename) == []
assert (list(site.get_dependencies("_%s" % filename)) ==
list(site.templates))
assert (list(site.get_dependencies("%s" % filename)) == [filename])
def test_render_template(site, build_path):
site.render_template(site.get_template('template1.html'))
template1 = build_path.join("template1.html")
assert template1.check()
assert template1.read() == "Test 1"
def test_render_nested_template(site, build_path):
site.render_template(site.get_template('sub/template3.html'))
template3 = build_path.join('sub').join("template3.html")
assert template3.check()
assert template3.read() == "Test 3"
def test_render_templates(site, build_path):
site.render_templates(site.templates)
template1 = build_path.join("template1.html")
assert template1.check()
assert template1.read() == "Test 1"
template3 = build_path.join('sub').join("template3.html")
assert template3.check()
assert template3.read() == "Test 3"
def test_build(site):
templates = []
def fake_site(template, context=None, filepath=None):
templates.append(template)
site.render_template = fake_site
site.render()
assert templates == list(site.templates)
def test_with_reloader(reloader, site):
reloader.watch_called = False
def watch(self):
reloader.watch_called = True
Reloader.watch = watch
site.render(use_reloader=True)
assert reloader.watch_called
def test_should_handle(reloader, template_path):
template1_path = template_path.join("template1.html")
test4_path = template_path.join("test4.html")
test4_path.write('')
assert reloader.should_handle("modified", str(template1_path))
assert reloader.should_handle("modified", str(test4_path))
assert not reloader.should_handle("created", str(template1_path))
def test_event_handler(reloader, template_path):
templates = []
def fake_site(template, context=None, filepath=None):
templates.append(template)
reloader.site.render_template = fake_site
template1_path = str(template_path.join("template1.html"))
reloader.event_handler("modified", template1_path)
assert templates == [reloader.site.get_template("template1.html")]
def test_event_handler_static(reloader, template_path):
found_files = []
def fake_copy_static(files):
found_files.extend(files)
reloader.site.staticpaths = ["static_css"]
reloader.site.copy_static = fake_copy_static
template1_path = str(template_path.join("static_css").join("hello.css"))
reloader.event_handler("modified", template1_path)
assert found_files == list(reloader.site.static_names)
def test_ignored_file_is_ignored(site):
assert site.is_ignored('.index.html')
def test_regular_file_is_not_ignored(site):
assert not site.is_ignored('index.html')
def test_ignored_file_in_directory_is_ignored(site):
assert site.is_ignored('.bar/index.html')
def test_ignored_file_in_nested_directory_is_ignored(site):
assert site.is_ignored('foo/.bar/index.html')
def test_partial_file_is_partial(site):
assert site.is_partial('_index.html')
def test_regular_file_is_not_partial(site):
assert not site.is_partial('index.html')
def test_partial_file_in_directory_is_partial(site):
assert site.is_partial('_bar/index.html')
def test_partial_file_in_nested_directory_is_partial(site):
assert site.is_partial('foo/_bar/index.html')
@mock.patch('os.path.isdir')
@mock.patch('os.getcwd')
@mock.patch('staticjinja.cli.staticjinja.make_site')
def test_cli_srcpath(mock_make_site, mock_getcwd, mock_isdir):
mock_isdir.return_value = True
mock_getcwd.return_value = '/'
cli.render({
'--srcpath': 'templates',
'--outpath': None,
'--static': None,
'watch': False,
})
mock_make_site.assert_called_once_with(
searchpath='/templates',
outpath='/',
staticpaths=None
)
@mock.patch('os.path.isdir')
@mock.patch('os.getcwd')
@mock.patch('staticjinja.cli.staticjinja.make_site')
def test_cli_srcpath_default(mock_make_site, mock_getcwd, mock_isdir):
mock_isdir.return_value = True
mock_getcwd.return_value = '/'
cli.render({
'--srcpath': None,
'--outpath': None,
'--static': None,
'watch': False,
})
mock_make_site.assert_called_once_with(
searchpath='/templates',
outpath='/',
staticpaths=None
)
@mock.patch('os.path.isdir')
@mock.patch('os.getcwd')
@mock.patch('staticjinja.cli.staticjinja.make_site')
def test_cli_srcpath_absolute(mock_make_site, mock_getcwd, mock_isdir):
mock_isdir.return_value = True
mock_getcwd.return_value = '/'
cli.render({
'--srcpath': '/foo/templates',
'--outpath': None,
'--static': None,
'watch': False,
})
mock_make_site.assert_called_once_with(
searchpath='/foo/templates',
outpath='/',
staticpaths=None
)
| |
#!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for patch based image processing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import struct
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import preprocess
import utils
from models.utils import get_net
from trainer import make_estimator
FLAGS = tf.flags.FLAGS
PATCH_H_COUNT = 3
PATCH_W_COUNT = 3
PATCH_COUNT = PATCH_H_COUNT * PATCH_W_COUNT
# It's supposed to be in the root folder, which is also pwd when running, if the
# instructions in the README are followed. Hence not a flag.
PERMUTATION_PATH = 'permutations_100_max.bin'
def apply_model(image_fn,
is_training,
num_outputs,
perms,
make_signature=False):
"""Creates the patch based model output from patches representations.
Args:
image_fn: function returns image tensor.
is_training: is training flag used for batch norm and drop out.
num_outputs: number of output classes.
perms: numpy array with shape [m, k], element range [0, PATCH_COUNT). k
stands for the patch numbers used in a permutation. m stands forthe number
of permutations. Each permutation is used to concat the patch inputs
[n*PATCH_COUNT, h, w, c] into tensor with shape [n*m, h, w, c*k].
make_signature: whether to create signature for hub module.
Returns:
out: output tensor with shape [n*m, 1, 1, num_outputs].
Raises:
ValueError: An error occurred when the architecture is unknown.
"""
images = image_fn()
net = get_net(num_classes=FLAGS.get_flag_value('embed_dim', 1000))
out, end_points = net(images, is_training,
weight_decay=FLAGS.get_flag_value('weight_decay', 1e-4))
print(end_points)
if not make_signature:
out = permutate_and_concat_batch_patches(out, perms)
out = fully_connected(out, num_outputs, is_training=is_training)
out = tf.squeeze(out, [1, 2])
if make_signature:
hub.add_signature(inputs={'image': images}, outputs=out)
hub.add_signature(
name='representation',
inputs={'image': images},
outputs=end_points)
return out
def image_grid(images, ny, nx, padding=0):
"""Create a batch of image grids from a batch of images.
Args:
images: A batch of patches (B,N,H,W,C)
ny: vertical number of images
nx: horizontal number of images
padding: number of zeros between images, if any.
Returns:
A tensor batch of image grids shaped (B,H*ny,W*nx,C), although that is a
simplifying lie: if padding is used h/w will be different.
"""
with tf.name_scope('grid_image'):
if padding:
padding = [padding, padding]
images = tf.pad(images, [[0, 0], [0, 0], padding, padding, [0, 0]])
return tf.concat([
tf.concat([images[:, y * nx + x] for x in range(nx)], axis=-2)
for y in range(ny)], axis=-3)
def creates_estimator_model(images, labels, perms, num_classes, mode):
"""Creates EstimatorSpec for the patch based self supervised models.
Args:
images: images
labels: self supervised labels (class indices)
perms: patch permutations
num_classes: number of different permutations
mode: model's mode: training, eval or prediction
Returns:
EstimatorSpec
"""
print(' +++ Mode: %s, images: %s, labels: %s' % (mode, images, labels))
images = tf.reshape(images, shape=[-1] + images.get_shape().as_list()[-3:])
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
with tf.variable_scope('module'):
image_fn = lambda: images
logits = apply_model(
image_fn=image_fn,
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
num_outputs=num_classes,
perms=perms,
make_signature=False)
else:
input_shape = utils.str2intlist(
FLAGS.get_flag_value('serving_input_shape', 'None,None,None,3'))
image_fn = lambda: tf.placeholder( # pylint: disable=g-long-lambda
shape=input_shape,
dtype=tf.float32)
apply_model_function = functools.partial(
apply_model,
image_fn=image_fn,
num_outputs=num_classes,
perms=perms,
make_signature=True)
tf_hub_module_spec = hub.create_module_spec(
apply_model_function, [(utils.TAGS_IS_TRAINING, {
'is_training': True
}), (set(), {
'is_training': False
})],
drop_collections=['summaries'])
tf_hub_module = hub.Module(tf_hub_module_spec, trainable=False, tags=set())
hub.register_module_for_export(tf_hub_module, export_name='module')
logits = tf_hub_module(images)
return make_estimator(mode, predictions=logits)
# build loss and accuracy
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
eval_metrics = (
lambda labels, logits: { # pylint: disable=g-long-lambda
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=-1))},
[labels, logits])
return make_estimator(mode, loss, eval_metrics, logits)
def fully_connected(inputs,
num_classes=100,
weight_decay=5e-4,
keep_prob=0.5,
is_training=True):
"""Two layers fully connected network copied from Alexnet fc7-fc8."""
net = inputs
_, _, w, _ = net.get_shape().as_list()
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=weight_decay)
net = tf.layers.conv2d(
net,
filters=4096,
kernel_size=w,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.constant_initializer(0.1),
kernel_regularizer=kernel_regularizer)
net = tf.layers.batch_normalization(
net, momentum=0.997, epsilon=1e-5, fused=None, training=is_training)
net = tf.nn.relu(net)
if is_training:
net = tf.nn.dropout(net, keep_prob=keep_prob)
net = tf.layers.conv2d(
net,
filters=num_classes,
kernel_size=1,
padding='same',
kernel_initializer=tf.truncated_normal_initializer(0.0, 0.005),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=kernel_regularizer)
return net
def generate_patch_locations():
"""Generates relative patch locations."""
perms = np.array([(i, 4) for i in range(9) if i != 4])
return perms, len(perms)
def load_permutations():
"""Loads a set of pre-defined permutations."""
with tf.gfile.Open(PERMUTATION_PATH, 'rb') as f:
int32_size = 4
s = f.read(int32_size * 2)
[num_perms, c] = struct.unpack('<ll', s)
perms = []
for _ in range(num_perms * c):
s = f.read(int32_size)
x = struct.unpack('<l', s)
perms.append(x[0])
perms = np.reshape(perms, [num_perms, c])
# The bin file used index [1,9] for permutation, updated to [0, 8] for index.
perms = perms - 1
assert np.min(perms) == 0 and np.max(perms) == PATCH_COUNT - 1
return perms, num_perms
def permutate_and_concat_image_patches(patch_embeddings, perms):
"""Permutates patches from an image according to permutations.
Args:
patch_embeddings: input tensor with shape [PATCH_COUNT, h, w, c], where
PATCH_COUNT is the patch number per image.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [m, h, w, c*k].
"""
_, h, w, c = patch_embeddings.get_shape().as_list()
if isinstance(perms, np.ndarray):
num_perms, perm_len = perms.shape
else:
num_perms, perm_len = perms.get_shape().as_list()
def permutate_patch(perm):
permed = tf.gather(patch_embeddings, perm, axis=0)
concat_tensor = tf.transpose(permed, perm=[1, 2, 3, 0])
concat_tensor = tf.reshape(
concat_tensor, shape=[-1, h, w, perm_len * c])
return concat_tensor
permed_patches = tf.stack([
permutate_patch(perms[i]) for i in range(num_perms)
])
return permed_patches
def permutate_and_concat_batch_patches(batch_patch_embeddings, perms):
"""Permutates patches from a mini batch according to permutations.
Args:
batch_patch_embeddings: input tensor with shape [n*PATCH_COUNT, h, w, c] or
[n*PATCH_COUNT, c], where PATCH_COUNT is the patch number per image
and n is the number of images in this mini batch.
perms: numpy array with shape [m, k], with element in range
[0, PATCH_COUNT). Permutation is used to concat the patches.
Returns:
out: output tensor with shape [n*m, h, w, c*k].
"""
print(' +++ permutate patches input: %s' % batch_patch_embeddings)
if len(batch_patch_embeddings.get_shape().as_list()) == 4:
_, h, w, c = batch_patch_embeddings.get_shape().as_list()
elif len(batch_patch_embeddings.get_shape().as_list()) == 2:
_, c = batch_patch_embeddings.get_shape().as_list()
h, w = (1, 1)
else:
raise ValueError('Unexpected batch_patch_embeddings shape: %s' %
batch_patch_embeddings.get_shape().as_list())
patches = tf.reshape(batch_patch_embeddings, shape=[-1, PATCH_COUNT, h, w, c])
patches = tf.stack([
permutate_and_concat_image_patches(patches[i], perms)
for i in range(patches.get_shape().as_list()[0])
])
patches = tf.reshape(patches, shape=[-1, h, w, perms.shape[1] * c])
print(' +++ permutate patches output: %s' % batch_patch_embeddings)
return patches
def get_patch_representation(
images,
hub_module,
patch_preprocess='crop_patches,standardization',
is_training=False,
target_features=9000,
pooling_fn=None,
combine_patches='concat',
signature='representation'):
"""Permutates patches from a mini batch according to permutations.
Args:
images: input images, can be full image (NHWC) or image patchs (NPHWC).
hub_module: hub module.
patch_preprocess: preprocess applied to the image. Note that preprocess may
require setting parameters in the FLAGS.config file.
is_training: is training mode.
target_features: target feature dimension. Note that the features might
exceed this number if there're too many channels.
pooling_fn: pooling method applied to the features.
combine_patches: one of {'concat', 'max_pool', 'avg_pool'}.
signature: signature for the hub module.
Returns:
out: output representation tensors.
Raises:
ValueError: unsupported combine_patches.
"""
if patch_preprocess:
preprocess_fn = preprocess.get_preprocess_fn(patch_preprocess, is_training)
images = preprocess_fn({'image': images})['image']
assert len(images.get_shape().as_list()) == 5, 'Shape must match NPHWC.'
_, num_of_patches, h, w, c = images.get_shape().as_list()
images = tf.reshape(images, shape=[-1, h, w, c])
out_tensors = hub_module(
images,
signature=signature,
as_dict=True)
if combine_patches == 'concat':
target_features = target_features // num_of_patches
if pooling_fn is not None:
out_tensors = pooling_fn(out_tensors)
for k, t in out_tensors.iteritems():
if len(t.get_shape().as_list()) == 2:
t = t[:, None, None, :]
assert len(t.get_shape().as_list()) == 4, 'Unsupported rank %d' % len(
t.get_shape().as_list())
# Take patch-dimension out of batch-dimension: [NP]HWC -> NPHWC
t = tf.reshape(t, [-1, num_of_patches] + t.get_shape().as_list()[-3:])
if combine_patches == 'concat':
# [N, P, H, W, C] -> [N, H, W, P*C]
_, p, h, w, c = t.get_shape().as_list()
out_tensors[k] = tf.reshape(
tf.transpose(t, perm=[0, 2, 3, 4, 1]), tf.stack([-1, h, w, p * c]))
elif combine_patches == 'max_pool':
# Reduce max on P channel of NPHWC.
out_tensors[k] = tf.reduce_max(t, axis=1)
elif combine_patches == 'avg_pool':
# Reduce mean on P channel of NPHWC.
out_tensors[k] = tf.reduce_mean(t, axis=1)
else:
raise ValueError(
'Unsupported combine patches method %s.' % combine_patches)
return out_tensors
| |
from __future__ import unicode_literals
import logging
import sys
import types
from django import http
from django.conf import settings
from django.core import signals, urlresolvers
from django.core.exceptions import (
MiddlewareNotUsed, PermissionDenied, SuspiciousOperation,
)
from django.db import connections, transaction
from django.http.multipartparser import MultiPartParserError
from django.utils import six
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = None
self._view_middleware = None
self._template_response_middleware = None
self._response_middleware = None
self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed as exc:
if settings.DEBUG:
if six.text_type(exc):
logger.debug('MiddlewareNotUsed(%r): %s', middleware_path, exc)
else:
logger.debug('MiddlewareNotUsed: %r', middleware_path)
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_exception_response(self, request, resolver, status_code):
try:
callback, param_dict = resolver.resolve_error_handler(status_code)
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
# Complain if the template response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_template_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
response = self.get_exception_response(request, resolver, 404)
except PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
response = self.get_exception_response(request, resolver, 403)
except MultiPartParserError:
logger.warning(
'Bad request (Unable to parse request body): %s', request.path,
extra={
'status_code': 400,
'request': request
})
response = self.get_exception_response(request, resolver, 400)
except SuspiciousOperation as e:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
security_logger.error(
force_text(e),
extra={
'status_code': 400,
'request': request
})
if settings.DEBUG:
return debug.technical_500_response(request, *sys.exc_info(), status_code=400)
response = self.get_exception_response(request, resolver, 400)
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
# Complain if the response middleware returned None (a common error).
if response is None:
raise ValueError(
"%s.process_response didn't return an "
"HttpResponse object. It returned None instead."
% (middleware_method.__self__.__class__.__name__))
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve_error_handler(500)
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
| |
# Package: events
# Date: 11th April 2010
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Events
This module define the basic Event object and commmon events.
"""
class Event(object):
"""Create a new Event Object
Create a new Event Object populating it with the given list of arguments
and keyword arguments.
:ivar name: The name of the Event
:ivar channel: The channel this Event is bound for
:ivar target: The target Component this Event is bound for
:ivar success: An optional channel to use for Event Handler success
:ivar failure: An optional channel to use for Event Handler failure
:ivar filter: An optional channel to use if an Event is filtered
:ivar start: An optional channel to use before an Event starts
:ivar end: An optional channel to use when an Event ends
:ivar value: The future Value object used to store the result of an event
:param args: list of arguments
:type args: tuple
:param kwargs: dct of keyword arguments
:type kwargs: dict
"""
channel = None
target = None
handler = None
success = None
failure = None
filter = None
start = None
end = None
value = None
def __init__(self, *args, **kwargs):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
self.args = list(args)
self.kwargs = kwargs
def __getstate__(self):
keys = ("args", "kwargs", "channel", "target", "success", "failure",
"filter", "start", "end", "value", "source")
return dict([(k, getattr(self, k, None)) for k in keys])
@property
def name(self):
return self.__class__.__name__
def __eq__(self, other):
""" x.__eq__(other) <==> x==other
Tests the equality of Event self against Event y.
Two Events are considered "equal" iif the name,
channel and target are identical as well as their
args and kwargs passed.
"""
return (self.__class__ is other.__class__
and self.channel == other.channel
and self.args == other.args
and self.kwargs == other.kwargs)
def __repr__(self):
"x.__repr__() <==> repr(x)"
if type(self.channel) is tuple:
channel = "%s:%s" % self.channel
else:
channel = self.channel or ""
return "<%s[%s] %s %s>" % (self.name, channel, self.args, self.kwargs)
def __getitem__(self, x):
"""x.__getitem__(y) <==> x[y]
Get and return data from the Event object requested by "x".
If an int is passed to x, the requested argument from self.args
is returned index by x. If a str is passed to x, the requested
keyword argument from self.kwargs is returned keyed by x.
Otherwise a TypeError is raised as nothing else is valid.
"""
if type(x) is int:
return self.args[x]
elif type(x) is str:
return self.kwargs[x]
else:
raise TypeError("Expected int or str, got %r" % type(x))
def __setitem__(self, i, y):
"""x.__setitem__(i, y) <==> x[i] = y
Modify the data in the Event object requested by "x".
If i is an int, the ith requested argument from self.args
shall be changed to y. If i is a str, the requested value
keyed by i from self.kwargs, shall by changed to y.
Otherwise a TypeError is raised as nothing else is valid.
"""
if type(i) is int:
self.args[i] = y
elif type(i) is str:
self.kwargs[i] = y
else:
raise TypeError("Expected int or str, got %r" % type(i))
class Error(Event):
"""Error Event
This Event is sent for any exceptions that occur during the execution
of an Event Handler that is not SystemExit or KeyboardInterrupt.
:param type: type of exception
:type type: type
:param value: exception object
:type value: exceptions.TypeError
:param traceback: traceback of exception
:type traceback: traceback
:param kwargs: (Optional) Additional Information
:type kwargs: dict
"""
channel = "exception"
def __init__(self, type, value, traceback, handler=None):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Error, self).__init__(type, value, traceback, handler)
class Success(Event):
"""Success Event
This Event is sent when an Event Handler's execution has completed
successfully.
:param evt: The event that succeeded
:type evt: Event
:param handler: The handler that executed this event
:type handler: @handler
:param retval: The returned value of the handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Success, self).__init__(event, handler, retval)
class Failure(Event):
"""Failure Event
This Event is sent when an error has occured with the execution of an
Event Handlers.
:param evt: The event that failued
:type evt: Event
:param handler: The handler that failed
:type handler: @handler
:param error: A tuple containing the exception that occured
:type error: (etype, evalue, traceback)
"""
def __init__(self, event, handler, error):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Failure, self).__init__(event, handler, error)
class Filter(Event):
"""Filter Event
This Event is sent when an Event is filtered by some Event Handler.
:param evt: The event that was filtered
:type evt: Event
:param handler: The handler that filtered this event
:type handler: @handler
:param retval: The returned value of the handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Filter, self).__init__(event, handler, retval)
class Start(Event):
"""Start Event
This Event is sent just before an Event is started
:param evt: The event about to start
:type evt: Event
"""
def __init__(self, event):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Start, self).__init__(event)
class End(Event):
"""End Event
This Event is sent just after an Event has ended
:param evt: The event that has finished
:type evt: Event
:param handler: The last handler that executed this event
:type handler: @handler
:param retval: The returned value of the last handler
:type retval: object
"""
def __init__(self, event, handler, retval):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(End, self).__init__(event, handler, retval)
class Started(Event):
"""Started Event
This Event is sent when a Component has started running.
:param component: The component that was started
:type component: Component or Manager
:param mode: The mode in which the Component was started,
P (Process), T (Thread) or None (Main Thread / Main Process).
:type str: str or None
"""
def __init__(self, component, mode):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Started, self).__init__(component, mode)
class Stopped(Event):
"""Stopped Event
This Event is sent when a Component has stopped running.
:param component: The component that has stopped
:type component: Component or Manager
"""
def __init__(self, component):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Stopped, self).__init__(component)
class Signal(Event):
"""Signal Event
This Event is sent when a Component receives a signal.
:param signal: The signal number received.
:type int: An int value for the signal
:param stack: The interrupted stack frame.
:type object: A stack frame
"""
def __init__(self, signal, stack):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Signal, self).__init__(signal, stack)
class Registered(Event):
"""Registered Event
This Event is sent when a Component has registered with another Component
or Manager. This Event is only sent iif the Component or Manager being
registered with is not itself.
:param component: The Component being registered
:type component: Component
:param manager: The Component or Manager being registered with
:type manager: Component or Manager
"""
def __init__(self, component, manager):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Registered, self).__init__(component, manager)
class Unregistered(Event):
"""Unregistered Event
This Event is sent when a Component has been unregistered from it's
Component or Manager.
"""
def __init__(self, component, manager):
"x.__init__(...) initializes x; see x.__class__.__doc__ for signature"
super(Unregistered, self).__init__(component, manager)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| |
"""
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
"""
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTInteger64, OFTReal, OFTString,
OFTTime,
)
from django.utils import six
from django.utils.six.moves import zip
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, six.string_types):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_':
mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom:
gtype.to_multi()
_mapping[geom_name] = str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print(ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True))
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__`/`__str__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, six.string_types):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields:
kwlist.append('null=True')
if field_name.lower() in blank_fields:
kwlist.append('blank=True')
if kwlist:
return ', ' + ', '.join(kwlist)
else:
return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in zip(
ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_':
mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (
mfield, width, precision, kwargs_str
)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger64:
yield ' %s = models.BigIntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTTime:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom:
gtype.to_multi()
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
yield ' objects = models.GeoManager()'
if name_field:
yield ''
yield ' def __%s__(self): return self.%s' % (
'str' if six.PY3 else 'unicode', name_field)
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from ambari_commons import OSConst
from resource_management.core.resources.system import Directory, Execute, File
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.resources.template_config import TemplateConfig
from resource_management.libraries.functions.format import format
from resource_management.core.source import Template, InlineTemplate
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hbase(name=None, action = None):
import params
Directory(params.hbase_conf_dir,
owner = params.hadoop_user,
create_parents = True
)
Directory(params.hbase_tmp_dir,
create_parents = True,
owner = params.hadoop_user
)
Directory (os.path.join(params.local_dir, "jars"),
owner = params.hadoop_user,
create_parents = True
)
XmlConfig("hbase-site.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-site'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.hadoop_user
)
if 'ams-hbase-policy' in params.config['configurations']:
XmlConfig("hbase-policy.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-policy'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
owner = params.hadoop_user
)
# Manually overriding ownership of file installed by hadoop package
else:
File(os.path.join(params.hbase_conf_dir, "hbase-policy.xml"),
owner = params.hadoop_user
)
# Metrics properties
File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
owner = params.hbase_user,
content=Template("hadoop-metrics2-hbase.properties.j2")
)
hbase_TemplateConfig('regionservers', user=params.hadoop_user)
if params.security_enabled:
hbase_TemplateConfig(format("hbase_{name}_jaas.conf"), user=params.hadoop_user)
if name != "client":
Directory (params.hbase_log_dir,
owner = params.hadoop_user,
create_parents = True
)
if (params.hbase_log4j_props != None):
File(os.path.join(params.hbase_conf_dir, "log4j.properties"),
owner=params.hadoop_user,
content=params.hbase_log4j_props
)
elif (os.path.exists(os.path.join(params.hbase_conf_dir,"log4j.properties"))):
File(os.path.join(params.hbase_conf_dir,"log4j.properties"),
owner=params.hadoop_user
)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hbase(name=None # 'master' or 'regionserver' or 'client'
, action=None):
import params
Directory(params.hbase_conf_dir,
owner = params.hbase_user,
group = params.user_group,
create_parents = True,
recursive_ownership = True,
)
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
cd_access="a",
create_parents = True,
recursive_ownership = True,
)
Directory (os.path.join(params.local_dir, "jars"),
owner = params.hbase_user,
group = params.user_group,
cd_access="a",
mode=0775,
create_parents = True
)
if params.hbase_wal_dir:
Directory(params.hbase_wal_dir,
owner=params.hbase_user,
group = params.user_group,
cd_access="a",
create_parents = True,
recursive_ownership = True,
)
merged_ams_hbase_site = {}
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-site'])
if params.security_enabled:
merged_ams_hbase_site.update(params.config['configurations']['ams-hbase-security-site'])
if not params.is_hbase_distributed:
File(format("{hbase_conf_dir}/core-site.xml"),
action='delete',
owner=params.hbase_user)
File(format("{hbase_conf_dir}/hdfs-site.xml"),
action='delete',
owner=params.hbase_user)
XmlConfig("hbase-site.xml",
conf_dir = params.hbase_conf_dir,
configurations = merged_ams_hbase_site,
configuration_attributes=params.config['configuration_attributes']['ams-hbase-site'],
owner = params.hbase_user,
group = params.user_group
)
# Phoenix spool file dir if not /tmp
if not os.path.exists(params.phoenix_server_spool_dir):
Directory(params.phoenix_server_spool_dir,
owner=params.ams_user,
mode = 0755,
group=params.user_group,
cd_access="a",
create_parents = True
)
pass
if 'ams-hbase-policy' in params.config['configurations']:
XmlConfig("hbase-policy.xml",
conf_dir = params.hbase_conf_dir,
configurations = params.config['configurations']['ams-hbase-policy'],
configuration_attributes=params.config['configuration_attributes']['ams-hbase-policy'],
owner = params.hbase_user,
group = params.user_group
)
# Manually overriding ownership of file installed by hadoop package
else:
File( format("{params.hbase_conf_dir}/hbase-policy.xml"),
owner = params.hbase_user,
group = params.user_group
)
File(format("{hbase_conf_dir}/hbase-env.sh"),
owner = params.hbase_user,
content=InlineTemplate(params.hbase_env_sh_template)
)
# Metrics properties
File(os.path.join(params.hbase_conf_dir, "hadoop-metrics2-hbase.properties"),
owner = params.hbase_user,
group = params.user_group,
content=Template("hadoop-metrics2-hbase.properties.j2")
)
# hbase_TemplateConfig( params.metric_prop_file_name,
# tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'
# )
hbase_TemplateConfig('regionservers', user=params.hbase_user)
if params.security_enabled:
hbase_TemplateConfig( format("hbase_{name}_jaas.conf"), user=params.hbase_user)
hbase_TemplateConfig( format("hbase_client_jaas.conf"), user=params.hbase_user)
hbase_TemplateConfig( format("ams_zookeeper_jaas.conf"), user=params.hbase_user)
if name != "client":
Directory( params.hbase_pid_dir,
owner = params.hbase_user,
create_parents = True,
cd_access = "a",
mode = 0755,
)
Directory (params.hbase_log_dir,
owner = params.hbase_user,
create_parents = True,
cd_access = "a",
mode = 0755,
)
if name == "master":
if not params.is_local_fs_rootdir:
# If executing Stop All, HDFS is probably down
if action != 'stop' and not params.skip_create_hbase_root_dir:
params.HdfsResource(params.hbase_root_dir,
type="directory",
action="create_on_execute",
owner=params.hbase_user,
mode=0775,
dfs_type=params.dfs_type
)
params.HdfsResource(params.hbase_staging_dir,
type="directory",
action="create_on_execute",
owner=params.hbase_user,
mode=0711,
dfs_type=params.dfs_type
)
params.HdfsResource(None, action="execute")
if params.is_hbase_distributed:
#Workaround for status commands not aware of operating mode
File(format("{params.hbase_pid_dir}/distributed_mode"), action="create", mode=0644, owner=params.hbase_user)
pass
else:
local_root_dir = params.hbase_root_dir
#cut protocol name
if local_root_dir.startswith("file://"):
local_root_dir = local_root_dir[7:]
#otherwise assume dir name is provided as is
Directory(local_root_dir,
owner = params.hbase_user,
cd_access="a",
create_parents = True,
recursive_ownership = True
)
File(format("{params.hbase_pid_dir}/distributed_mode"), action="delete", owner=params.hbase_user)
if params.hbase_log4j_props is not None:
File(format("{params.hbase_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.hbase_user,
content=InlineTemplate(params.hbase_log4j_props)
)
elif os.path.exists(format("{params.hbase_conf_dir}/log4j.properties")):
File(format("{params.hbase_conf_dir}/log4j.properties"),
mode=0644,
group=params.user_group,
owner=params.hbase_user
)
def hbase_TemplateConfig(name, tag=None, user=None):
import params
TemplateConfig( os.path.join(params.hbase_conf_dir, name),
owner = user,
template_tag = tag
)
| |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import brew, core, workspace
import caffe2.python.hypothesis_test_util as hu
from caffe2.python.model_helper import ModelHelper
import unittest
class TestSpatialBN(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode_3d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine="CUDNN",
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(batch_size, input_channels, size, size, size)\
.astype(np.float32) - 0.5
if order == "NHWC":
X = X.transpose(0, 2, 3, 4, 1)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine="CUDNN",
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis]
var = var[np.newaxis, :, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine=engine
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(1e-5, 1e-2),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["X" if inplace else "Y",
"running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine=engine,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 1, 2, 3, 4])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_train_mode_gradient_check(
self, size, input_channels, batch_size, seed, order, epsilon,
engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine=engine
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_spatialbn_train_mode_gradient_check_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine="CUDNN",
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0], stepsize=0.01)
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
epsilon=st.floats(1e-5, 1e-2),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
model = ModelHelper(name='test_spatialbn_brew_wrapper')
brew.spatial_bn(
model,
'X',
'Y',
input_channels,
epsilon=epsilon,
is_test=False,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
if __name__ == "__main__":
unittest.main()
| |
""" simpleTALES Implementation
Copyright (c) 2009 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
The classes in this module implement the TALES specification, used
by the simpleTAL module.
Module Dependencies: logging
"""
import types, sys
import logging
import simpletal
__version__ = simpletal.__version__
DEFAULTVALUE = "This represents a Default value."
class PathNotFoundException (Exception):
pass
class ContextContentException (Exception):
""" This is raised when invalid content has been placed into the Context object.
For example using non-ascii characters instead of Unicode strings.
"""
pass
PATHNOTFOUNDEXCEPTION = PathNotFoundException()
class ContextVariable (BaseException):
def __init__ (self, value = None):
self.ourValue = value
def value (self, currentPath=None):
if (hasattr (self.ourValue, "__call__")):
return self.ourValue()
return self.ourValue
def rawValue (self):
return self.ourValue
def __str__ (self):
return repr (self.ourValue)
class RepeatVariable (ContextVariable):
""" To be written"""
def __init__ (self, sequence):
ContextVariable.__init__ (self, 1)
self.sequence = sequence
self.position = 0
self.map = None
def value (self, currentPath=None):
if (self.map is None):
self.createMap()
return self.map
def rawValue (self):
return self.value()
def getCurrentValue (self):
return self.sequence [self.position]
def increment (self):
self.position += 1
if (self.position == len (self.sequence)):
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = len (self.sequence)
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
# Repeat implementation goes here
def getIndex (self):
return self.position
def getNumber (self):
return self.position + 1
def getEven (self):
if ((self.position % 2) != 0):
return 0
return 1
def getOdd (self):
if ((self.position % 2) == 0):
return 0
return 1
def getStart (self):
if (self.position == 0):
return 1
return 0
def getEnd (self):
if (self.position == len (self.sequence) - 1):
return 1
return 0
def getLowerLetter (self):
result = ""
nextCol = self.position
if (nextCol == 0):
return 'a'
while (nextCol > 0):
nextCol, thisCol = divmod (nextCol, 26)
result = chr (ord ('a') + thisCol) + result
return result
def getUpperLetter (self):
return self.getLowerLetter().upper()
def getLowerRoman (self):
romanNumeralList = (('m', 1000)
,('cm', 900)
,('d', 500)
,('cd', 400)
,('c', 100)
,('xc', 90)
,('l', 50)
,('xl', 40)
,('x', 10)
,('ix', 9)
,('v', 5)
,('iv', 4)
,('i', 1)
)
if (self.position > 3999):
# Roman numbers only supported up to 4000
return ' '
num = self.position + 1
result = ""
for roman, integer in romanNumeralList:
while (num >= integer):
result += roman
num -= integer
return result
def getUpperRoman (self):
return self.getLowerRoman().upper()
class IteratorRepeatVariable (RepeatVariable):
def __init__ (self, sequence):
RepeatVariable.__init__ (self, sequence)
self.curValue = None
self.iterStatus = 0
def getCurrentValue (self):
if (self.iterStatus == 0):
self.iterStatus = 1
try:
self.curValue = next(self.sequence)
except StopIteration as e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
return self.curValue
def increment (self):
# Need this for the repeat variable functions.
self.position += 1
try:
self.curValue = next(self.sequence)
except StopIteration as e:
self.iterStatus = 2
raise IndexError ("Repeat Finished")
def createMap (self):
self.map = {}
self.map ['index'] = self.getIndex
self.map ['number'] = self.getNumber
self.map ['even'] = self.getEven
self.map ['odd'] = self.getOdd
self.map ['start'] = self.getStart
self.map ['end'] = self.getEnd
# TODO: first and last need to be implemented.
self.map ['length'] = sys.maxsize
self.map ['letter'] = self.getLowerLetter
self.map ['Letter'] = self.getUpperLetter
self.map ['roman'] = self.getLowerRoman
self.map ['Roman'] = self.getUpperRoman
def getEnd (self):
if (self.iterStatus == 2):
return 1
return 0
class PathFunctionVariable (ContextVariable):
def __init__ (self, func):
ContextVariable.__init__ (self, value = func)
self.func = func
def value (self, currentPath=None):
if (currentPath is not None):
index, paths = currentPath
result = ContextVariable (self.func ('/'.join (paths[index:])))
# Fast track the result
raise result
class CachedFuncResult (ContextVariable):
def value (self, currentPath=None):
try:
return self.cachedValue
except:
self.cachedValue = ContextVariable.value (self)
return self.cachedValue
def clearCache (self):
try:
del self.cachedValue
except:
pass
class PythonPathFunctions:
def __init__ (self, context):
self.context = context
def path (self, expr):
return self.context.evaluatePath (expr)
def string (self, expr):
return self.context.evaluateString (expr)
def exists (self, expr):
return self.context.evaluateExists (expr)
def nocall (self, expr):
return self.context.evaluateNoCall (expr)
def test (self, *arguments):
if (len (arguments) % 2):
# We have an odd number of arguments - which means the last one is a default
pairs = arguments[:-1]
defaultValue = arguments[-1]
else:
# No default - so use None
pairs = arguments
defaultValue = None
index = 0
while (index < len (pairs)):
test = pairs[index]
index += 1
value = pairs[index]
index += 1
if (test):
return value
return defaultValue
class Context:
def __init__ (self, options=None, allowPythonPath=0):
self.allowPythonPath = allowPythonPath
self.globals = {}
self.locals = {}
self.localStack = []
self.repeatStack = []
self.populateDefaultVariables (options)
self.log = logging.getLogger ("simpleTALES.Context")
self.true = 1
self.false = 0
self.pythonPathFuncs = PythonPathFunctions (self)
def addRepeat (self, name, var, initialValue):
# Pop the current repeat map onto the stack
self.repeatStack.append (self.repeatMap)
self.repeatMap = self.repeatMap.copy()
self.repeatMap [name] = var
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
# Add in the locals
self.pushLocals()
self.setLocal (name, initialValue)
def removeRepeat (self, name):
# Bring the old repeat map back
self.repeatMap = self.repeatStack.pop()
# Map this repeatMap into the global space
self.addGlobal ('repeat', self.repeatMap)
def addGlobal (self, name, value):
self.globals[name] = value
def pushLocals (self):
# Push the current locals onto a stack so that we can safely over-ride them.
self.localStack.append (self.locals)
self.locals = self.locals.copy()
def setLocal (self, name, value):
# Override the current local if present with the new one
self.locals [name] = value
def popLocals (self):
self.locals = self.localStack.pop()
def evaluate (self, expr, originalAtts = None):
# Returns a ContextVariable
#self.log.debug ("Evaluating %s" % expr)
if (originalAtts is not None):
# Call from outside
self.globals['attrs'] = originalAtts
suppressException = 1
else:
suppressException = 0
# Supports path, exists, nocall, not, and string
expr = expr.strip ()
try:
if expr.startswith ('path:'):
return self.evaluatePath (expr[5:].lstrip ())
elif expr.startswith ('exists:'):
return self.evaluateExists (expr[7:].lstrip())
elif expr.startswith ('nocall:'):
return self.evaluateNoCall (expr[7:].lstrip())
elif expr.startswith ('not:'):
return self.evaluateNot (expr[4:].lstrip())
elif expr.startswith ('string:'):
return self.evaluateString (expr[7:].lstrip())
elif expr.startswith ('python:'):
return self.evaluatePython (expr[7:].lstrip())
else:
# Not specified - so it's a path
return self.evaluatePath (expr)
except PathNotFoundException as e:
if (suppressException):
return None
raise e
def evaluatePython (self, expr):
if (not self.allowPythonPath):
self.log.warn ("Parameter allowPythonPath is false. NOT Evaluating python expression %s" % expr)
return self.false
#self.log.debug ("Evaluating python expression %s" % expr)
globals={}
for name, value in list(self.globals.items()):
if (isinstance (value, ContextVariable)): value = value.rawValue()
globals [name] = value
globals ['path'] = self.pythonPathFuncs.path
globals ['string'] = self.pythonPathFuncs.string
globals ['exists'] = self.pythonPathFuncs.exists
globals ['nocall'] = self.pythonPathFuncs.nocall
globals ['test'] = self.pythonPathFuncs.test
locals={}
for name, value in list(self.locals.items()):
if (isinstance (value, ContextVariable)): value = value.rawValue()
locals [name] = value
try:
result = eval(expr, globals, locals)
if (isinstance (result, ContextVariable)):
return result.value()
return result
except Exception as e:
# An exception occured evaluating the template, return the exception as text
self.log.warn ("Exception occurred evaluating python path, exception: " + str (e))
return "Exception: %s" % str (e)
def evaluatePath (self, expr):
#self.log.debug ("Evaluating path expression %s" % expr)
allPaths = expr.split ('|')
if (len (allPaths) > 1):
for path in allPaths:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException as e:
# Path didn't exist, try the next one
pass
# No paths evaluated - raise exception.
raise PATHNOTFOUNDEXCEPTION
else:
# A single path - so let's evaluate it.
# This *can* raise PathNotFoundException
return self.traversePath (allPaths[0])
def evaluateExists (self, expr):
#self.log.debug ("Evaluating %s to see if it exists" % expr)
allPaths = expr.split ('|')
# The first path is for us
# Return true if this first bit evaluates, otherwise test the rest
try:
result = self.traversePath (allPaths[0], canCall = 0)
return self.true
except PathNotFoundException as e:
# Look at the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
pathResult = self.evaluate (path.strip ())
# If this is part of a "exists: path1 | exists: path2" path then we need to look at the actual result.
if (pathResult):
return self.true
except PathNotFoundException as e:
pass
# If we get this far then there are *no* paths that exist.
return self.false
def evaluateNoCall (self, expr):
#self.log.debug ("Evaluating %s using nocall" % expr)
allPaths = expr.split ('|')
# The first path is for us
try:
return self.traversePath (allPaths[0], canCall = 0)
except PathNotFoundException as e:
# Try the rest of the paths.
pass
for path in allPaths[1:]:
# Evaluate this path
try:
return self.evaluate (path.strip ())
except PathNotFoundException as e:
pass
# No path evaluated - raise error
raise PATHNOTFOUNDEXCEPTION
def evaluateNot (self, expr):
#self.log.debug ("Evaluating NOT value of %s" % expr)
# Evaluate what I was passed
try:
pathResult = self.evaluate (expr)
except PathNotFoundException as e:
# In SimpleTAL the result of "not: no/such/path" should be TRUE not FALSE.
return self.true
if (pathResult is None):
# Value was Nothing
return self.true
if (pathResult == DEFAULTVALUE):
return self.false
try:
resultLen = len (pathResult)
if (resultLen > 0):
return self.false
else:
return self.true
except:
# Not a sequence object.
pass
if (not pathResult):
return self.true
# Everything else is true, so we return false!
return self.false
def evaluateString (self, expr):
#self.log.debug ("Evaluating String %s" % expr)
result = ""
skipCount = 0
for position in range (0,len (expr)):
if (skipCount > 0):
skipCount -= 1
else:
if (expr[position] == '$'):
try:
if (expr[position + 1] == '$'):
# Escaped $ sign
result += '$'
skipCount = 1
elif (expr[position + 1] == '{'):
# Looking for a path!
endPos = expr.find ('}', position + 1)
if (endPos > 0):
path = expr[position + 2:endPos]
# Evaluate the path - missing paths raise exceptions as normal.
try:
pathResult = self.evaluate (path)
except PathNotFoundException as e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = ''
if (pathResult is not None):
if (isinstance (pathResult, str)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += str (pathResult)
skipCount = endPos - position
else:
# It's a variable
endPos = expr.find (' ', position + 1)
if (endPos == -1):
endPos = len (expr)
path = expr [position + 1:endPos]
# Evaluate the variable - missing paths raise exceptions as normal.
try:
pathResult = self.traversePath (path)
except PathNotFoundException as e:
# This part of the path didn't evaluate to anything - leave blank
pathResult = ''
if (pathResult is not None):
if (isinstance (pathResult, str)):
result += pathResult
else:
# THIS IS NOT A BUG!
# Use Unicode in Context if you aren't using Ascii!
result += str (pathResult)
skipCount = endPos - position - 1
except IndexError as e:
# Trailing $ sign - just suppress it
self.log.warn ("Trailing $ detected")
pass
else:
result += expr[position]
return result
def traversePath (self, expr, canCall=1):
# canCall only applies to the *final* path destination, not points down the path.
# Check for and correct for trailing/leading quotes
if (expr.startswith ('"') or expr.startswith ("'")):
if (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [1:-1]
else:
expr = expr [1:]
elif (expr.endswith ('"') or expr.endswith ("'")):
expr = expr [0:-1]
pathList = expr.split ('/')
path = pathList[0]
if path.startswith ('?'):
path = path[1:]
if path in self.locals:
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (hasattr (path, "__call__")):path = path()
elif path in self.globals:
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (hasattr (path, "__call__")):path = path()
#self.log.debug ("Dereferenced to %s" % path)
if path in self.locals:
val = self.locals[path]
elif path in self.globals:
val = self.globals[path]
else:
# If we can't find it then raise an exception
raise PATHNOTFOUNDEXCEPTION
index = 1
for path in pathList[1:]:
#self.log.debug ("Looking for path element %s" % path)
if path.startswith ('?'):
path = path[1:]
if path in self.locals:
path = self.locals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (hasattr (path, "__call__")):path = path()
elif path in self.globals:
path = self.globals[path]
if (isinstance (path, ContextVariable)): path = path.value()
elif (hasattr (path, "__call__")):path = path()
#self.log.debug ("Dereferenced to %s" % path)
try:
if (isinstance (val, ContextVariable)): temp = val.value((index,pathList))
elif (hasattr (val, "__call__")):temp = val()
else: temp = val
except ContextVariable as e:
# Fast path for those functions that return values
return e.value()
if (hasattr (temp, path)):
val = getattr (temp, path)
else:
try:
try:
val = temp[path]
except TypeError:
val = temp[int(path)]
except:
#self.log.debug ("Not found.")
raise PATHNOTFOUNDEXCEPTION
index = index + 1
#self.log.debug ("Found value %s" % str (val))
if (canCall):
try:
if (isinstance (val, ContextVariable)): result = val.value((index,pathList))
elif (hasattr (val, "__call__")):result = val()
else: result = val
except ContextVariable as e:
# Fast path for those functions that return values
return e.value()
else:
if (isinstance (val, ContextVariable)): result = val.realValue
else: result = val
return result
def __str__ (self):
return "Globals: " + str (self.globals) + "Locals: " + str (self.locals)
def populateDefaultVariables (self, options):
vars = {}
self.repeatMap = {}
vars['nothing'] = None
vars['default'] = DEFAULTVALUE
vars['options'] = options
# To start with there are no repeats
vars['repeat'] = self.repeatMap
vars['attrs'] = None
# Add all of these to the global context
for name in list(vars.keys()):
self.addGlobal (name,vars[name])
# Add also under CONTEXTS
self.addGlobal ('CONTEXTS', vars)
| |
from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (Author, Book, Publisher, Clues, Entries, HardbackBook,
ItemTag, WithManualPK, Alfa, Bravo, Charlie)
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost': 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": "067232959",
"mean_auth_age": 45.0,
"name": "Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost': 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page': 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2 * F('num_books')).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards') / 2).order_by('name').values('name', 'num_books', 'num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
('Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier'),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub': 'publisher_id', 'foo': 'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping([], [])
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'age' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_author', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessDBFeature("allows_group_by_pk")
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
grouping, gb_params = results.query.get_compiler(using='default').get_grouping([], [])
self.assertEqual(len(grouping), 1)
assert 'id' in grouping[0]
assert 'name' not in grouping[0]
assert 'contact' not in grouping[0]
# The query group_by property should also only show the `id`.
self.assertEqual(results.query.group_by, [('aggregation_regress_book', 'id')])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertTrue(' INNER JOIN ' in str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertTrue(' INNER JOIN ' in str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertTrue(' LEFT OUTER JOIN ' in str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertTrue(' INNER JOIN ' in str(qs.query))
| |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
version = '0.1.0'
import os, sys, time, datetime, string, math, zipfile, codecs, re, shutil, subprocess, base64
from datetime import date
from xml.dom.minidom import parseString
import simplejson
try:
import markdown2 as markdown
except ImportError:
import markdown
try:
os.environ["TITANIUM_SDK"]
except KeyError:
print "Please set the TITANIUM_SDK environment variable"
sys.exit(1)
sdk_path = os.environ["TITANIUM_SDK"]
sys.path.append(os.path.join(sdk_path, "common"))
ignoreFiles = ['.DS_Store','.cvsignore','.gitignore']
ignoreDirs = ['.svn','_svn','.git','CVS','CVSROOT']
required_manifest_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
manifest_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def getText(nodelist):
rc = ''
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
rc = rc.strip()
if rc.lower() in ['true', 'yes', '1']:
rc = 'true'
elif rc in ['false', 'no', '0']:
rc = 'false'
return rc
class Compiler(object):
def __init__(self, deploytype):
start_time = time.time()
if not os.path.exists(sdk_path):
print '[ERROR] Unable to find SDK path "%s"' % sdk_path
sys.exit(1)
print '[INFO] Titanium Mobile Web Module Compiler v%s' % version
self.deploytype = deploytype
self.module_path = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
self.src_path = os.path.join(self.module_path, 'src')
self.build_path = os.path.join(self.module_path, 'build')
self.load_manifest()
self.check_license()
self.load_timodule_xml()
self.check_main()
self.modules_map = {}
self.require_cache = {}
self.parse_module(self.main, None)
self.modules_to_cache = []
for module in self.require_cache:
if module != self.main and os.path.exists(os.path.join(self.build_path, module + '.js')):
self.modules_to_cache.append(module)
if 'precache' in self.timodule and 'requires' in self.timodule['precache'] and len(self.timodule['precache']['requires']):
for req in self.timodule['precache']['requires']:
self.modules_to_cache.append('commonjs:' + req)
self.precache_images = []
if 'precache' in self.timodule and 'images' in self.timodule['precache'] and len(self.timodule['precache']['images']):
for img in self.timodule['precache']['images']:
self.precache_images.append(img)
if os.path.exists(self.build_path):
shutil.rmtree(self.build_path, True)
try:
os.makedirs(self.build_path)
except:
pass
self.copy(self.src_path, self.build_path)
self.build_js()
self.minify_js()
self.package()
total_time = round(time.time() - start_time)
total_minutes = math.floor(total_time / 60)
total_seconds = total_time % 60
if total_minutes > 0:
print '[INFO] Finished in %s minutes %s seconds' % (int(total_minutes), int(total_seconds))
else:
print '[INFO] Finished in %s seconds' % int(total_time)
def load_manifest(self):
self.manifest = {}
manifest_file = os.path.join(self.module_path, 'manifest')
if not os.path.exists(manifest_file):
print '[ERROR] Unable to find manifest file'
sys.exit(1)
for line in open(manifest_file).readlines():
line = line.strip()
if line[0:1] == '#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
self.manifest[key.strip()] = value.strip()
for key in required_manifest_keys:
if not self.manifest.has_key(key):
print '[ERROR] Missing required manifest key "%s"' % key
sys.exit(1)
if manifest_defaults.has_key(key):
defvalue = manifest_defaults[key]
curvalue = self.manifest[key]
if curvalue == defvalue:
print '[WARN] Please update the manifest key: "%s" to a non-default value' % key
def check_license(self):
c = open(os.path.join(self.module_path, 'LICENSE')).read()
if c.find(module_license_default) != -1:
print '[WARN] Please update the LICENSE file with your license text before distributing'
def load_timodule_xml(self):
global_settings = {}
mobileweb_settings = {}
timodule_file = os.path.join(self.module_path, 'timodule.xml')
if not os.path.exists(timodule_file):
print '[ERROR] Unable to find timodule.xml file'
sys.exit(1)
dom = parseString(codecs.open(timodule_file,'r','utf-8','replace').read().encode('utf-8'))
root = dom.documentElement
for node in root.childNodes:
if node.nodeType == 1 and node.nodeName not in ['android', 'iphone']:
if node.nodeName == 'mobileweb':
for subnode in node.childNodes:
if subnode.nodeType == 1:
self.get_xml_children(mobileweb_settings[subnode.nodeName], subnode.childNodes)
else:
self.get_xml_children(global_settings[node.nodeName], node.childNodes)
self.timodule = dict(global_settings.items() + mobileweb_settings.items())
def check_main(self):
self.main = self.timodule['main'] if 'main' in self.timodule else self.manifest['name']
if not os.path.exists(os.path.join(self.src_path, self.main + '.js')):
print '[ERROR] Unable to find main module "%s"' % self.main
sys.exit(1)
def get_xml_children(self, dest, nodes):
if len(nodes) > 1:
dest = {}
for child in nodes.childNodes:
if child.nodeType == 1:
self.get_xml_children(dest[child.nodeName], child.childNodes)
else:
dest = getText(child.childNodes)
def compact_path(self, path):
result = []
path = path.replace('\\', '/').split('/');
while len(path):
segment = path[0]
path = path[1:]
if segment == '..' and len(result) and lastSegment != '..':
result.pop()
lastSegment = result[-1]
elif segment != '.':
lastSegment = segment
result.append(segment)
return '/'.join(result);
def resolve(self, it, ref):
parts = it.split('!')
it = parts[-1]
if it.startswith('url:'):
it = it[4:]
if it.startswith('/'):
it = '.' + it
parts = it.split('/')
return [self.build_path, it]
if it.find(':') != -1:
return []
if it.startswith('/') or (len(parts) == 1 and it.endswith('.js')):
return [self.build_path, it]
if it.startswith('.') and ref is not None:
it = self.compact_path(ref + it)
parts = it.split('/')
return [self.build_path, it]
def parse_module(self, module, ref):
if module in self.require_cache or module == 'require':
return
parts = module.split('!')
if len(parts) == 1:
if module.startswith('.') and ref is not None:
module = self.compact_path(ref + module)
self.require_cache[module] = 1
dep = self.resolve(module, ref)
if not len(dep):
return
if len(parts) > 1:
self.require_cache['url:' + parts[1]] = 1
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
source = os.path.join(dep[0], filename)
if not os.path.exists(source):
return
source = codecs.open(source, 'r', 'utf-8').read()
pattern = re.compile('define\(\s*([\'\"][^\'\"]*[\'\"]\s*)?,?\s*(\[[^\]]+\])\s*?,?\s*(function|\{)')
results = pattern.search(source)
if results is None:
self.modules_map[module] = []
else:
groups = results.groups()
if groups is not None and len(groups):
if groups[1] is None:
self.modules_map[module] = []
else:
deps = self.parse_deps(groups[1])
for i in range(0, len(deps)):
dep = deps[i]
parts = dep.split('!')
ref = module.split('/')
ref.pop()
ref = '/'.join(ref) + '/'
if dep.startswith('.'):
deps[i] = self.compact_path(ref + dep)
if len(parts) == 1:
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
else:
self.modules_map[dep] = parts[0]
self.parse_module(parts[0], module)
if parts[0] == 'Ti/_/text':
if dep.startswith('./'):
parts = module.split('/')
parts.pop()
parts.append(dep)
self.parse_module(self.compact_path('/'.join(parts)), ref)
else:
self.parse_module(dep, ref)
self.modules_map[module] = deps
def parse_deps(self, deps):
found = []
if len(deps) > 2:
deps = deps[1:-1]
deps = deps.split(',')
for dep in deps:
dep = dep.strip().split(' ')[0].strip()
if dep.startswith('\'') or dep.startswith('"'):
found.append(simplejson.loads(dep))
return found
def copy(self, src_path, dest_path):
print '[INFO] Copying %s...' % src_path
for root, dirs, files in os.walk(src_path):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.startswith('._'):
continue
source = os.path.join(root, file)
dest = os.path.expanduser(source.replace(src_path, dest_path, 1))
dest_dir = os.path.expanduser(os.path.split(dest)[0])
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copy(source, dest)
def build_js(self):
main_file = os.path.join(self.build_path, self.main + '.js')
tmp = main_file + '.tmp'
js = codecs.open(tmp, 'w', encoding='utf-8')
if len(self.modules_to_cache) > 0 or len(self.precache_images) > 0:
js.write('require.cache({\n')
first = True
for x in self.modules_to_cache:
if x == self.main:
continue
is_cjs = False
if x.startswith('commonjs:'):
is_cjs = True
x = x[9:]
dep = self.resolve(x, None)
if not len(dep):
continue
if not first:
js.write(',\n')
first = False
filename = dep[1]
if not filename.endswith('.js'):
filename += '.js'
file_path = os.path.join(dep[0], filename)
if x.startswith('url:'):
source = file_path + '.uncompressed.js'
if self.minify:
os.rename(file_path, source)
print '[INFO] Minifying include %s' % file_path
p = subprocess.Popen('java -Xms256m -Xmx256m -jar "%s" --compilation_level SIMPLE_OPTIMIZATIONS --js "%s" --js_output_file "%s"' % (os.path.join(sdk_path, 'mobileweb', 'closureCompiler', 'compiler.jar'), source, file_path), shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
print '[ERROR] Failed to minify "%s"' % file_path
for line in stderr.split('\n'):
if len(line):
print '[ERROR] %s' % line
print '[WARN] Leaving %s un-minified' % file_path
os.remove(file_path)
shutil.copy(source, file_path)
js.write('"%s":"%s"' % (x, codecs.open(file_path, 'r', 'utf-8').read().strip().replace('\\', '\\\\').replace('\n', '\\n\\\n').replace('\"', '\\\"')))
elif is_cjs:
js.write('"%s":function(){\n/* %s */\ndefine(function(require, exports, module){\n%s\n});\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
else:
js.write('"%s":function(){\n/* %s */\n\n%s\n}' % (x, file_path.replace(self.build_path, ''), codecs.open(file_path, 'r', 'utf-8').read()))
image_mime_types = {
'.png': 'image/png',
'.gif': 'image/gif',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg'
}
for x in self.precache_images:
x = x.replace('\\', '/')
y = x
if y.startswith(os.sep):
y = '.' + y
img = os.path.join(self.module_path, os.sep.join(y.split('/')))
if os.path.exists(img):
fname, ext = os.path.splitext(img.lower())
if ext in image_mime_types:
if not first:
js.write(',\n')
first = False
js.write('"url:%s":"data:%s;base64,%s"' % (x, image_mime_types[ext], base64.b64encode(open(img,'rb').read())))
js.write('});\n')
js.write(codecs.open(main_file, 'r', 'utf-8').read())
js.close()
os.remove(main_file)
os.rename(tmp, main_file)
def minify_js(self):
subprocess.call('java -Xms256m -Xmx256m -cp "%s%s%s" -Djava.awt.headless=true minify "%s"' % (
os.path.join(sdk_path, 'mobileweb', 'minify'),
os.pathsep,
os.path.join(sdk_path, 'mobileweb', 'closureCompiler', 'compiler.jar'),
self.build_path
), shell=True)
def generate_doc(self):
docdir = os.path.join(self.module_path, 'documentation')
if not os.path.exists(docdir):
print '[WARN] Couldn\'t find documentation file at: %s' % docdir
return None
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir, file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def zip_dir(self, zf, dir, basepath):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name)
for file in files:
if file in ignoreFiles or file.endswith('.uncompressed.js'):
continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc':
continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def package(self):
name = self.manifest['name'].lower()
moduleid = self.manifest['moduleid'].lower()
version = self.manifest['version']
install_path = 'modules/commonjs/%s/%s' % (moduleid, version)
zip_file = os.path.join(self.module_path, '%s-commonjs-%s.zip' % (moduleid,version))
if os.path.exists(zip_file):
os.remove(zip_file)
zf = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
zf.write(os.path.join(self.module_path, 'manifest'), '%s/manifest' % install_path)
zf.write(os.path.join(self.module_path, 'LICENSE'), '%s/LICENSE' % install_path)
zf.writestr('%s/package.json' % install_path, simplejson.dumps({
'name': self.manifest['name'],
'description': self.manifest['description'],
'version': self.manifest['version'],
'directories': {
'lib': '.'
},
'main': self.main
}, indent=4, sort_keys=True))
self.zip_dir(zf, 'build', '%s' % install_path)
self.zip_dir(zf, 'example', '%s/example' % install_path)
docs = self.generate_doc()
if docs != None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file, '.md', '.html')
zf.writestr('%s/documentation/%s' % (install_path, filename), html)
zf.close()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1].lower() in ['help', '--help', '-h']:
print 'Usage: %s [<deploytype>]' % os.path.basename(sys.argv[0])
sys.exit(1)
Compiler('production' if len(sys.argv) <= 1 else sys.argv[1].lower())
sys.exit(0)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides a base class for tensor-like objects and methods for
basic tensor manipulation. It also provides a class, SquareTensor,
that provides basic methods for creating and manipulating rank 2 tensors
"""
import collections
import itertools
import os
import string
import warnings
import numpy as np
from monty.json import MSONable
from monty.serialization import loadfn
from scipy.linalg import polar
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
__author__ = "Joseph Montoya"
__credits__ = "Maarten de Jong, Shyam Dwaraknath, Wei Chen, " "Mark Asta, Anubhav Jain, Terence Lew"
voigt_map = [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)]
reverse_voigt_map = np.array([[0, 5, 4], [5, 1, 3], [4, 3, 2]])
DEFAULT_QUAD = loadfn(os.path.join(os.path.dirname(__file__), "quad_data.json"))
class Tensor(np.ndarray, MSONable):
"""
Base class for doing useful general operations on Nth order tensors,
without restrictions on the type (stress, elastic, strain, piezo, etc.)
"""
symbol = "T"
def __new__(cls, input_array, vscale=None, check_rank=None):
"""
Create a Tensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array: (array-like with shape 3^N): array-like representing
a tensor quantity in standard (i. e. non-voigt) notation
vscale: (N x M array-like): a matrix corresponding
to the coefficients of the voigt-notation tensor
"""
obj = np.asarray(input_array).view(cls)
obj.rank = len(obj.shape)
if check_rank and check_rank != obj.rank:
raise ValueError("{} input must be rank {}".format(obj.__class__.__name__, check_rank))
vshape = tuple([3] * (obj.rank % 2) + [6] * (obj.rank // 2))
obj._vscale = np.ones(vshape)
if vscale is not None:
obj._vscale = vscale
if obj._vscale.shape != vshape:
raise ValueError("Voigt scaling matrix must be the shape of the " "voigt notation matrix or vector.")
if not all(i == 3 for i in obj.shape):
raise ValueError(
"Pymatgen only supports 3-dimensional tensors, "
"and default tensor constructor uses standard "
"notation. To construct from voigt notation, use"
" {}.from_voigt".format(obj.__class__.__name__)
)
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.rank = getattr(obj, "rank", None)
self._vscale = getattr(obj, "_vscale", None)
self._vdict = getattr(obj, "_vdict", None)
def __array_wrap__(self, obj):
"""
Overrides __array_wrap__ methods in ndarray superclass to avoid errors
associated with functions that return scalar values
"""
if len(obj.shape) == 0:
return obj[()]
return np.ndarray.__array_wrap__(self, obj)
def __hash__(self):
"""
define a hash function, since numpy arrays
have their own __eq__ method
"""
return hash(self.tostring())
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.__str__())
def zeroed(self, tol=1e-3):
"""
returns the matrix with all entries below a certain threshold
(i.e. tol) set to zero
"""
new_tensor = self.copy()
new_tensor[abs(new_tensor) < tol] = 0
return new_tensor
def transform(self, symm_op):
"""
Applies a transformation (via a symmetry operation) to a tensor.
Args:
symm_op (SymmOp): a symmetry operation to apply to the tensor
"""
return self.__class__(symm_op.transform_tensor(self))
def rotate(self, matrix, tol=1e-3):
"""
Applies a rotation directly, and tests input matrix to ensure a valid
rotation.
Args:
matrix (3x3 array-like): rotation matrix to be applied to tensor
tol (float): tolerance for testing rotation matrix validity
"""
matrix = SquareTensor(matrix)
if not matrix.is_rotation(tol):
raise ValueError("Rotation matrix is not valid.")
sop = SymmOp.from_rotation_and_translation(matrix, [0.0, 0.0, 0.0])
return self.transform(sop)
def einsum_sequence(self, other_arrays, einsum_string=None):
"""
Calculates the result of an einstein summation expression
"""
if not isinstance(other_arrays, list):
raise ValueError("other tensors must be list of " "tensors or tensor input")
other_arrays = [np.array(a) for a in other_arrays]
if not einsum_string:
lc = string.ascii_lowercase
einsum_string = lc[: self.rank]
other_ranks = [len(a.shape) for a in other_arrays]
idx = self.rank - sum(other_ranks)
for length in other_ranks:
einsum_string += "," + lc[idx : idx + length]
idx += length
einsum_args = [self] + list(other_arrays)
return np.einsum(einsum_string, *einsum_args)
def project(self, n):
"""
Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector
"""
n = get_uvec(n)
return self.einsum_sequence([n] * self.rank)
def average_over_unit_sphere(self, quad=None):
"""
Method for averaging the tensor projection over the unit
with option for custom quadrature.
Args:
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
Returns:
Average of tensor projected into vectors on the unit sphere
"""
quad = quad or DEFAULT_QUAD
weights, points = quad["weights"], quad["points"]
return sum([w * self.project(n) for w, n in zip(weights, points)])
def get_grouped_indices(self, voigt=False, **kwargs):
"""
Gets index sets for equivalent tensor values
Args:
voigt (bool): whether to get grouped indices
of voigt or full notation tensor, defaults
to false
**kwargs: keyword args for np.isclose. Can take atol
and rtol for absolute and relative tolerance, e. g.
>>> tensor.group_array_indices(atol=1e-8)
or
>>> tensor.group_array_indices(rtol=1e-5)
Returns:
list of index groups where tensor values are equivalent to
within tolerances
"""
if voigt:
array = self.voigt
else:
array = self
indices = list(itertools.product(*[range(n) for n in array.shape]))
remaining = indices.copy()
# Start with everything near zero
grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs))))]
remaining = [i for i in remaining if i not in grouped[0]]
# Iteratively run through remaining indices
while remaining:
new = list(zip(*np.where(np.isclose(array, array[remaining[0]], **kwargs))))
grouped.append(new)
remaining = [i for i in remaining if i not in new]
# Don't return any empty lists
return [g for g in grouped if g]
def get_symbol_dict(self, voigt=True, zero_index=False, **kwargs):
"""
Creates a summary dict for tensor with associated symbol
Args:
voigt (bool): whether to get symbol dict for voigt
notation tensor, as opposed to full notation,
defaults to true
zero_index (bool): whether to set initial index to zero,
defaults to false, since tensor notations tend to use
one-indexing, rather than zero indexing like python
**kwargs: keyword args for np.isclose. Can take atol
and rtol for absolute and relative tolerance, e. g.
>>> tensor.get_symbol_dict(atol=1e-8)
or
>>> tensor.get_symbol_dict(rtol=1e-5)
Returns:
list of index groups where tensor values are equivalent to
within tolerances
Returns:
"""
d = {}
if voigt:
array = self.voigt
else:
array = self
grouped = self.get_grouped_indices(voigt=voigt, **kwargs)
if zero_index:
p = 0
else:
p = 1
for indices in grouped:
sym_string = self.symbol + "_"
sym_string += "".join([str(i + p) for i in indices[0]])
value = array[indices[0]]
if not np.isclose(value, 0):
d[sym_string] = array[indices[0]]
return d
def round(self, decimals=0):
"""
Wrapper around numpy.round to ensure object
of same type is returned
Args:
decimals :Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns (Tensor):
rounded tensor of same type
"""
return self.__class__(np.round(self, decimals=decimals))
@property
def symmetrized(self):
"""
Returns a generally symmetrized tensor, calculated by taking
the sum of the tensor and its transpose with respect to all
possible permutations of indices
"""
perms = list(itertools.permutations(range(self.rank)))
return sum([np.transpose(self, ind) for ind in perms]) / len(perms)
@property
def voigt_symmetrized(self):
"""
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation
tensor such that it is invariant wrt permutation of indices
"""
if not (self.rank % 2 == 0 and self.rank >= 2):
raise ValueError("V-symmetrization requires rank even and >= 2")
v = self.voigt
perms = list(itertools.permutations(range(len(v.shape))))
new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms)
return self.__class__.from_voigt(new_v)
def is_symmetric(self, tol=1e-5):
"""
Tests whether a tensor is symmetric or not based on the residual
with its symmetric part, from self.symmetrized
Args:
tol (float): tolerance to test for symmetry
"""
return (self - self.symmetrized < tol).all()
def fit_to_structure(self, structure, symprec=0.1):
"""
Returns a tensor that is invariant with respect to symmetry
operations corresponding to a structure
Args:
structure (Structure): structure from which to generate
symmetry operations
symprec (float): symmetry tolerance for the Spacegroup Analyzer
used to generate the symmetry operations
"""
sga = SpacegroupAnalyzer(structure, symprec)
symm_ops = sga.get_symmetry_operations(cartesian=True)
return sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops)
def is_fit_to_structure(self, structure, tol=1e-2):
"""
Tests whether a tensor is invariant with respect to the
symmetry operations of a particular structure by testing
whether the residual of the symmetric portion is below a
tolerance
Args:
structure (Structure): structure to be fit to
tol (float): tolerance for symmetry testing
"""
return (self - self.fit_to_structure(structure) < tol).all()
@property
def voigt(self):
"""
Returns the tensor in Voigt notation
"""
v_matrix = np.zeros(self._vscale.shape, dtype=self.dtype)
this_voigt_map = self.get_voigt_dict(self.rank)
for ind, v in this_voigt_map.items():
v_matrix[v] = self[ind]
if not self.is_voigt_symmetric():
warnings.warn("Tensor is not symmetric, information may " "be lost in voigt conversion.")
return v_matrix * self._vscale
def is_voigt_symmetric(self, tol=1e-6):
"""
Tests symmetry of tensor to that necessary for voigt-conversion
by grouping indices into pairs and constructing a sequence of
possible permutations to be used in a tensor transpose
"""
transpose_pieces = [[[0 for i in range(self.rank % 2)]]]
transpose_pieces += [[range(j, j + 2)] for j in range(self.rank % 2, self.rank, 2)]
for n in range(self.rank % 2, len(transpose_pieces)):
if len(transpose_pieces[n][0]) == 2:
transpose_pieces[n] += [transpose_pieces[n][0][::-1]]
for trans_seq in itertools.product(*transpose_pieces):
trans_seq = list(itertools.chain(*trans_seq))
if (self - self.transpose(trans_seq) > tol).any():
return False
return True
@staticmethod
def get_voigt_dict(rank):
"""
Returns a dictionary that maps indices in the tensor to those
in a voigt representation based on input rank
Args:
rank (int): Tensor rank to generate the voigt map
"""
vdict = {}
for ind in itertools.product(*[range(3)] * rank):
v_ind = ind[: rank % 2]
for j in range(rank // 2):
pos = rank % 2 + 2 * j
v_ind += (reverse_voigt_map[ind[pos : pos + 2]],)
vdict[ind] = v_ind
return vdict
@classmethod
def from_voigt(cls, voigt_input):
"""
Constructor based on the voigt notation vector or matrix.
Args:
voigt_input (array-like): voigt input for a given tensor
"""
voigt_input = np.array(voigt_input)
rank = sum(voigt_input.shape) // 3
t = cls(np.zeros([3] * rank))
if voigt_input.shape != t._vscale.shape:
raise ValueError("Invalid shape for voigt matrix")
voigt_input = voigt_input / t._vscale
this_voigt_map = t.get_voigt_dict(rank)
for ind, v in this_voigt_map.items():
t[ind] = voigt_input[v]
return cls(t)
@staticmethod
def get_ieee_rotation(structure, refine_rotation=True):
"""
Given a structure associated with a tensor, determines
the rotation matrix for IEEE conversion according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
refine_rotation (bool): whether to refine the rotation
using SquareTensor.refine_rotation
"""
# Check conventional setting:
sga = SpacegroupAnalyzer(structure)
dataset = sga.get_symmetry_dataset()
trans_mat = dataset["transformation_matrix"]
conv_latt = Lattice(np.transpose(np.dot(np.transpose(structure.lattice.matrix), np.linalg.inv(trans_mat))))
xtal_sys = sga.get_crystal_system()
vecs = conv_latt.matrix
lengths = np.array(conv_latt.abc)
angles = np.array(conv_latt.angles)
rotation = np.zeros((3, 3))
# IEEE rules: a,b,c || x1,x2,x3
if xtal_sys == "cubic":
rotation = [vecs[i] / lengths[i] for i in range(3)]
# IEEE rules: a=b in length; c,a || x3, x1
elif xtal_sys == "tetragonal":
rotation = np.array([vec / mag for (mag, vec) in sorted(zip(lengths, vecs), key=lambda x: x[0])])
if abs(lengths[2] - lengths[1]) < abs(lengths[1] - lengths[0]):
rotation[0], rotation[2] = rotation[2], rotation[0].copy()
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: c<a<b; c,a || x3,x1
elif xtal_sys == "orthorhombic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation = np.roll(rotation, 2, axis=0)
# IEEE rules: c,a || x3,x1, c is threefold axis
# Note this also includes rhombohedral crystal systems
elif xtal_sys in ("trigonal", "hexagonal"):
# find threefold axis:
tf_index = np.argmin(abs(angles - 120.0))
non_tf_mask = np.logical_not(angles == angles[tf_index])
rotation[2] = get_uvec(vecs[tf_index])
rotation[0] = get_uvec(vecs[non_tf_mask][0])
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
# IEEE rules: b,c || x2,x3; alpha=beta=90, c<a
elif xtal_sys == "monoclinic":
# Find unique axis
u_index = np.argmax(abs(angles - 90.0))
n_umask = np.logical_not(angles == angles[u_index])
rotation[1] = get_uvec(vecs[u_index])
# Shorter of remaining lattice vectors for c axis
c = [vec / mag for (mag, vec) in sorted(zip(lengths[n_umask], vecs[n_umask]))][0]
rotation[2] = np.array(c)
rotation[0] = np.cross(rotation[1], rotation[2])
# IEEE rules: c || x3, x2 normal to ac plane
elif xtal_sys == "triclinic":
rotation = [vec / mag for (mag, vec) in sorted(zip(lengths, vecs))]
rotation[1] = get_uvec(np.cross(rotation[2], rotation[0]))
rotation[0] = np.cross(rotation[1], rotation[2])
rotation = SquareTensor(rotation)
if refine_rotation:
rotation = rotation.refine_rotation()
return rotation
def convert_to_ieee(self, structure, initial_fit=True, refine_rotation=True):
"""
Given a structure associated with a tensor, attempts a
calculation of the tensor in IEEE format according to
the 1987 IEEE standards.
Args:
structure (Structure): a structure associated with the
tensor to be converted to the IEEE standard
initial_fit (bool): flag to indicate whether initial
tensor is fit to the symmetry of the structure.
Defaults to true. Note that if false, inconsistent
results may be obtained due to symmetrically
equivalent, but distinct transformations
being used in different versions of spglib.
refine_rotation (bool): whether to refine the rotation
produced by the ieee transform generator, default True
"""
rotation = self.get_ieee_rotation(structure, refine_rotation)
result = self.copy()
if initial_fit:
# pylint: disable=E1101
result = result.fit_to_structure(structure)
return result.rotate(rotation, tol=1e-2)
def structure_transform(self, original_structure, new_structure, refine_rotation=True):
"""
Transforms a tensor from one basis for an original structure
into a new basis defined by a new structure.
Args:
original_structure (Structure): structure corresponding
to the basis of the current tensor
new_structure (Structure): structure corresponding to the
desired basis
refine_rotation (bool): whether to refine the rotations
generated in get_ieee_rotation
Returns:
Tensor that has been transformed such that its basis
corresponds to the new_structure's basis
"""
sm = StructureMatcher()
if not sm.fit(original_structure, new_structure):
warnings.warn("original and new structures do not match!")
trans_1 = self.get_ieee_rotation(original_structure, refine_rotation)
trans_2 = self.get_ieee_rotation(new_structure, refine_rotation)
# Get the ieee format tensor
new = self.rotate(trans_1)
# Reverse the ieee format rotation for the second structure
new = new.rotate(np.transpose(trans_2))
return new
@classmethod
def from_values_indices(
cls,
values,
indices,
populate=False,
structure=None,
voigt_rank=None,
vsym=True,
verbose=False,
):
"""
Creates a tensor from values and indices, with options
for populating the remainder of the tensor.
Args:
values (floats): numbers to place at indices
indices (array-likes): indices to place values at
populate (bool): whether to populate the tensor
structure (Structure): structure to base population
or fit_to_structure on
voigt_rank (int): full tensor rank to indicate the
shape of the resulting tensor. This is necessary
if one provides a set of indices more minimal than
the shape of the tensor they want, e.g.
Tensor.from_values_indices((0, 0), 100)
vsym (bool): whether to voigt symmetrize during the
optimization procedure
verbose (bool): whether to populate verbosely
"""
# auto-detect voigt notation
# TODO: refactor rank inheritance to make this easier
indices = np.array(indices)
if voigt_rank:
shape = [3] * (voigt_rank % 2) + [6] * (voigt_rank // 2)
else:
shape = np.ceil(np.max(indices + 1, axis=0) / 3.0) * 3
base = np.zeros(shape.astype(int))
for v, idx in zip(values, indices):
base[tuple(idx)] = v
if 6 in shape:
obj = cls.from_voigt(base)
else:
obj = cls(base)
if populate:
assert structure, "Populate option must include structure input"
obj = obj.populate(structure, vsym=vsym, verbose=verbose)
elif structure:
obj = obj.fit_to_structure(structure)
return obj
def populate(self, structure, prec=1e-5, maxiter=200, verbose=False, precond=True, vsym=True):
"""
Takes a partially populated tensor, and populates the non-zero
entries according to the following procedure, iterated until
the desired convergence (specified via prec) is achieved.
1. Find non-zero entries
2. Symmetrize the tensor with respect to crystal symmetry and
(optionally) voigt symmetry
3. Reset the non-zero entries of the original tensor
Args:
structure (structure object)
prec (float): precision for determining a non-zero value
maxiter (int): maximum iterations for populating the tensor
verbose (bool): whether to populate verbosely
precond (bool): whether to precondition by cycling through
all symmops and storing new nonzero values, default True
vsym (bool): whether to enforce voigt symmetry, defaults
to True
"""
if precond:
# Generate the guess from populated
sops = SpacegroupAnalyzer(structure).get_symmetry_operations()
guess = Tensor(np.zeros(self.shape))
mask = abs(self) > prec
guess[mask] = self[mask]
def merge(old, new):
gmask = np.abs(old) > prec
nmask = np.abs(new) > prec
new_mask = np.logical_not(gmask) * nmask
avg_mask = gmask * nmask
old[avg_mask] = (old[avg_mask] + new[avg_mask]) / 2.0
old[new_mask] = new[new_mask]
if verbose:
print("Preconditioning for {} symmops".format(len(sops)))
for sop in sops:
rot = guess.transform(sop)
# Store non-zero entries of new that weren't previously
# in the guess in the guess
merge(guess, rot)
if verbose:
print("Preconditioning for voigt symmetry")
if vsym:
v = guess.voigt
perms = list(itertools.permutations(range(len(v.shape))))
for perm in perms:
vtrans = np.transpose(v, perm)
merge(v, vtrans)
guess = Tensor.from_voigt(v)
else:
guess = np.zeros(self.shape)
assert guess.shape == self.shape, "Guess must have same shape"
converged = False
test_new, test_old = [guess.copy()] * 2
for i in range(maxiter):
test_new = test_old.fit_to_structure(structure)
if vsym:
test_new = test_new.voigt_symmetrized
diff = np.abs(test_old - test_new)
converged = (diff < prec).all()
if converged:
break
test_new[mask] = self[mask]
test_old = test_new
if verbose:
print("Iteration {}: {}".format(i, np.max(diff)))
if not converged:
max_diff = np.max(np.abs(self - test_new))
warnings.warn("Warning, populated tensor is not converged " "with max diff of {}".format(max_diff))
return self.__class__(test_new)
def as_dict(self, voigt: bool = False) -> dict:
"""
Serializes the tensor object
Args:
voigt (bool): flag for whether to store entries in
voigt-notation. Defaults to false, as information
may be lost in conversion.
Returns (Dict):
serialized format tensor object
"""
input_array = self.voigt if voigt else self
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"input_array": input_array.tolist(),
}
if voigt:
d.update({"voigt": voigt})
return d
@classmethod
def from_dict(cls, d):
"""MSONAble from_dict implementation."""
voigt = d.get("voigt")
if voigt:
return cls.from_voigt(d["input_array"])
return cls(d["input_array"])
class TensorCollection(collections.abc.Sequence, MSONable):
"""
A sequence of tensors that can be used for fitting data
or for having a tensor expansion
"""
def __init__(self, tensor_list, base_class=Tensor):
"""
:param tensor_list: List of tensors.
:param base_class: Class to be used.
"""
self.tensors = [base_class(t) if not isinstance(t, base_class) else t for t in tensor_list]
def __len__(self):
return len(self.tensors)
def __getitem__(self, ind):
return self.tensors[ind]
def __iter__(self):
return self.tensors.__iter__()
def zeroed(self, tol=1e-3):
"""
:param tol: Tolerance
:return: TensorCollection where small values are set to 0.
"""
return self.__class__([t.zeroed(tol) for t in self])
def transform(self, symm_op):
"""
Transforms TensorCollection with a symmetry operation.
:param symm_op: SymmetryOperation.
:return: TensorCollection.
"""
return self.__class__([t.transform(symm_op) for t in self])
def rotate(self, matrix, tol=1e-3):
"""
Rotates TensorCollection.
:param matrix: Rotation matrix.
:param tol: tolerance.
:return: TensorCollection.
"""
return self.__class__([t.rotate(matrix, tol) for t in self])
@property
def symmetrized(self):
"""
:return: TensorCollection where all tensors are symmetrized.
"""
return self.__class__([t.symmetrized for t in self])
def is_symmetric(self, tol=1e-5):
"""
:param tol: tolerance
:return: Whether all tensors are symmetric.
"""
return all(t.is_symmetric(tol) for t in self)
def fit_to_structure(self, structure, symprec=0.1):
"""
Fits all tensors to a Structure.
:param structure: Structure
:param symprec: symmetry precision.
:return: TensorCollection.
"""
return self.__class__([t.fit_to_structure(structure, symprec) for t in self])
def is_fit_to_structure(self, structure, tol=1e-2):
"""
:param structure: Structure
:param tol: tolerance
:return: Whether all tensors are fitted to Structure.
"""
return all(t.is_fit_to_structure(structure, tol) for t in self)
@property
def voigt(self):
"""
:return: TensorCollection where all tensors are in voight form.
"""
return [t.voigt for t in self]
@property
def ranks(self):
"""
:return: Ranks for all tensors.
"""
return [t.rank for t in self]
def is_voigt_symmetric(self, tol=1e-6):
"""
:param tol: tolerance
:return: Whether all tensors are voigt symmetric.
"""
return all(t.is_voigt_symmetric(tol) for t in self)
@classmethod
def from_voigt(cls, voigt_input_list, base_class=Tensor):
"""
Creates TensorCollection from voigt form.
:param voigt_input_list: List of voigt tensors
:param base_class: Class for tensor.
:return: TensorCollection.
"""
return cls([base_class.from_voigt(v) for v in voigt_input_list])
def convert_to_ieee(self, structure, initial_fit=True, refine_rotation=True):
"""
Convert all tensors to IEEE.
:param structure: Structure
:param initial_fit: Whether to perform an initial fit.
:param refine_rotation: Whether to refine the rotation.
:return: TensorCollection.
"""
return self.__class__([t.convert_to_ieee(structure, initial_fit, refine_rotation) for t in self])
def round(self, *args, **kwargs):
"""
Round all tensors.
:param args: Passthrough to Tensor.round
:param kwargs: Passthrough to Tensor.round
:return: TensorCollection.
"""
return self.__class__([t.round(*args, **kwargs) for t in self])
@property
def voigt_symmetrized(self):
"""
:return: TensorCollection where all tensors are voigt symmetrized.
"""
return self.__class__([t.voigt_symmetrized for t in self])
def as_dict(self, voigt=False):
"""
:param voigt: Whether to use voight form.
:return: Dict representation of TensorCollection.
"""
tensor_list = self.voigt if voigt else self
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"tensor_list": [t.tolist() for t in tensor_list],
}
if voigt:
d.update({"voigt": voigt})
return d
@classmethod
def from_dict(cls, d):
"""
Creates TensorCollection from dict.
:param d: dict
:return: TensorCollection
"""
voigt = d.get("voigt")
if voigt:
return cls.from_voigt(d["tensor_list"])
return cls(d["tensor_list"])
class SquareTensor(Tensor):
"""
Base class for doing useful general operations on second rank tensors
(stress, strain etc.).
"""
def __new__(cls, input_array, vscale=None):
"""
Create a SquareTensor object. Note that the constructor uses __new__
rather than __init__ according to the standard method of
subclassing numpy ndarrays. Error is thrown when the class is
initialized with non-square matrix.
Args:
input_array (3x3 array-like): the 3x3 array-like
representing the content of the tensor
vscale (6x1 array-like): 6x1 array-like scaling the
voigt-notation vector with the tensor entries
"""
obj = super().__new__(cls, input_array, vscale, check_rank=2)
return obj.view(cls)
@property
def trans(self):
"""
shorthand for transpose on SquareTensor
"""
return SquareTensor(np.transpose(self))
@property
def inv(self):
"""
shorthand for matrix inverse on SquareTensor
"""
if self.det == 0:
raise ValueError("SquareTensor is non-invertible")
return SquareTensor(np.linalg.inv(self))
@property
def det(self):
"""
shorthand for the determinant of the SquareTensor
"""
return np.linalg.det(self)
def is_rotation(self, tol=1e-3, include_improper=True):
"""
Test to see if tensor is a valid rotation matrix, performs a
test to check whether the inverse is equal to the transpose
and if the determinant is equal to one within the specified
tolerance
Args:
tol (float): tolerance to both tests of whether the
the determinant is one and the inverse is equal
to the transpose
include_improper (bool): whether to include improper
rotations in the determination of validity
"""
det = np.abs(np.linalg.det(self))
if include_improper:
det = np.abs(det)
return (np.abs(self.inv - self.trans) < tol).all() and (np.abs(det - 1.0) < tol)
def refine_rotation(self):
"""
Helper method for refining rotation matrix by ensuring
that second and third rows are perpindicular to the first.
Gets new y vector from an orthogonal projection of x onto y
and the new z vector from a cross product of the new x and y
Args:
tol to test for rotation
Returns:
new rotation matrix
"""
new_x, y = get_uvec(self[0]), get_uvec(self[1])
# Get a projection on y
new_y = y - np.dot(new_x, y) * new_x
new_z = np.cross(new_x, new_y)
return SquareTensor([new_x, new_y, new_z])
def get_scaled(self, scale_factor):
"""
Scales the tensor by a certain multiplicative scale factor
Args:
scale_factor (float): scalar multiplier to be applied to the
SquareTensor object
"""
return SquareTensor(self * scale_factor)
@property
def principal_invariants(self):
"""
Returns a list of principal invariants for the tensor,
which are the values of the coefficients of the characteristic
polynomial for the matrix
"""
return np.poly(self)[1:] * np.array([-1, 1, -1])
def polar_decomposition(self, side="right"):
"""
calculates matrices for polar decomposition
"""
return polar(self, side=side)
def get_uvec(vec):
"""Gets a unit vector parallel to input vector"""
l = np.linalg.norm(vec)
if l < 1e-8:
return vec
return vec / l
def symmetry_reduce(tensors, structure, tol=1e-8, **kwargs):
"""
Function that converts a list of tensors corresponding to a structure
and returns a dictionary consisting of unique tensor keys with symmop
values corresponding to transformations that will result in derivative
tensors from the original list
Args:
tensors (list of tensors): list of Tensor objects to test for
symmetrically-equivalent duplicates
structure (Structure): structure from which to get symmetry
tol (float): tolerance for tensor equivalence
kwargs: keyword arguments for the SpacegroupAnalyzer
returns:
dictionary consisting of unique tensors with symmetry operations
corresponding to those which will reconstruct the remaining
tensors as values
"""
sga = SpacegroupAnalyzer(structure, **kwargs)
symmops = sga.get_symmetry_operations(cartesian=True)
unique_mapping = TensorMapping([tensors[0]], [[]], tol=tol)
for tensor in tensors[1:]:
is_unique = True
for unique_tensor, symmop in itertools.product(unique_mapping, symmops):
if np.allclose(unique_tensor.transform(symmop), tensor, atol=tol):
unique_mapping[unique_tensor].append(symmop)
is_unique = False
break
if is_unique:
unique_mapping[tensor] = []
return unique_mapping
class TensorMapping(collections.abc.MutableMapping):
"""
Base class for tensor mappings, which function much like
a dictionary, but use numpy routines to determine approximate
equality to keys for getting and setting items.
This is intended primarily for convenience with things like
stress-strain pairs and fitting data manipulation. In general,
it is significantly less robust than a typical hashing
and should be used with care.
"""
def __init__(self, tensors=None, values=None, tol=1e-5):
"""
Initialize a TensorMapping
Args:
tensor_list ([Tensor]): list of tensors
value_list ([]): list of values to be associated with tensors
tol (float): an absolute tolerance for getting and setting
items in the mapping
"""
self._tensor_list = tensors or []
self._value_list = values or []
if not len(self._tensor_list) == len(self._value_list):
raise ValueError("TensorMapping must be initialized with tensors" "and values of equivalent length")
self.tol = tol
def __getitem__(self, item):
index = self._get_item_index(item)
if index is None:
raise KeyError("{} not found in mapping.".format(item))
return self._value_list[index]
def __setitem__(self, key, value):
index = self._get_item_index(key)
if index is None:
self._tensor_list.append(key)
self._value_list.append(value)
else:
self._value_list[index] = value
def __delitem__(self, key):
index = self._get_item_index(key)
self._tensor_list.pop(index)
self._value_list.pop(index)
def __len__(self):
return len(self._tensor_list)
def __iter__(self):
for item in self._tensor_list:
yield item
def values(self):
"""
:return: Values in mapping.
"""
return self._value_list
def items(self):
"""
:return: Items in mapping.
"""
return zip(self._tensor_list, self._value_list)
def __contains__(self, item):
return not self._get_item_index(item) is None
def _get_item_index(self, item):
if len(self._tensor_list) == 0:
return None
item = np.array(item)
axis = tuple(range(1, len(item.shape) + 1))
mask = np.all(np.abs(np.array(self._tensor_list) - item) < self.tol, axis=axis)
indices = np.where(mask)[0]
if len(indices) > 1:
raise ValueError("Tensor key collision.")
if len(indices) == 0:
return None
return indices[0]
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: rpc.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='rpc.proto',
package='pyRpc2.pyRpc',
serialized_pb=_b('\n\trpc.proto\x12\x0cpyRpc2.pyRpc\"K\n\x07Request\x12\x14\n\x0cservice_name\x18\x01 \x02(\t\x12\x13\n\x0bmethod_name\x18\x02 \x02(\t\x12\x15\n\rrequest_proto\x18\x03 \x02(\x0c\"q\n\x05Reply\x12\x13\n\x0breply_proto\x18\x01 \x01(\x0c\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x17\n\x08\x63\x61llback\x18\x03 \x01(\x08:\x05\x66\x61lse\x12+\n\nerr_reason\x18\x04 \x01(\x0e\x32\x17.pyRpc2.pyRpc.ErrorType*\xf2\x01\n\tErrorType\x12\x17\n\x13\x45T_BAD_REQUEST_DATA\x10\x00\x12\x18\n\x14\x45T_BAD_REQUEST_PROTO\x10\x01\x12\x18\n\x14\x45T_SERVICE_NOT_FOUND\x10\x02\x12\x17\n\x13\x45T_METHOD_NOT_FOUND\x10\x03\x12\x10\n\x0c\x45T_RPC_ERROR\x10\x04\x12\x11\n\rET_RPC_FAILED\x10\x05\x12\x1c\n\x18\x45T_INVALID_REQUEST_PROTO\x10\x06\x12\x16\n\x12\x45T_BAD_REPLY_PROTO\x10\x07\x12\x13\n\x0f\x45T_UNKNOWN_HOST\x10\x08\x12\x0f\n\x0b\x45T_IO_ERROR\x10\t')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ERRORTYPE = _descriptor.EnumDescriptor(
name='ErrorType',
full_name='pyRpc2.pyRpc.ErrorType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ET_BAD_REQUEST_DATA', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_BAD_REQUEST_PROTO', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_SERVICE_NOT_FOUND', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_METHOD_NOT_FOUND', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_RPC_ERROR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_RPC_FAILED', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_INVALID_REQUEST_PROTO', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_BAD_REPLY_PROTO', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_UNKNOWN_HOST', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ET_IO_ERROR', index=9, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=220,
serialized_end=462,
)
_sym_db.RegisterEnumDescriptor(_ERRORTYPE)
ErrorType = enum_type_wrapper.EnumTypeWrapper(_ERRORTYPE)
ET_BAD_REQUEST_DATA = 0
ET_BAD_REQUEST_PROTO = 1
ET_SERVICE_NOT_FOUND = 2
ET_METHOD_NOT_FOUND = 3
ET_RPC_ERROR = 4
ET_RPC_FAILED = 5
ET_INVALID_REQUEST_PROTO = 6
ET_BAD_REPLY_PROTO = 7
ET_UNKNOWN_HOST = 8
ET_IO_ERROR = 9
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='pyRpc2.pyRpc.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='service_name', full_name='pyRpc2.pyRpc.Request.service_name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='method_name', full_name='pyRpc2.pyRpc.Request.method_name', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_proto', full_name='pyRpc2.pyRpc.Request.request_proto', index=2,
number=3, type=12, cpp_type=9, label=2,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=27,
serialized_end=102,
)
_REPLY = _descriptor.Descriptor(
name='Reply',
full_name='pyRpc2.pyRpc.Reply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='reply_proto', full_name='pyRpc2.pyRpc.Reply.reply_proto', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='pyRpc2.pyRpc.Reply.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='callback', full_name='pyRpc2.pyRpc.Reply.callback', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='err_reason', full_name='pyRpc2.pyRpc.Reply.err_reason', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=217,
)
_REPLY.fields_by_name['err_reason'].enum_type = _ERRORTYPE
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Reply'] = _REPLY
DESCRIPTOR.enum_types_by_name['ErrorType'] = _ERRORTYPE
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'rpc_pb2'
# @@protoc_insertion_point(class_scope:pyRpc2.pyRpc.Request)
))
_sym_db.RegisterMessage(Request)
Reply = _reflection.GeneratedProtocolMessageType('Reply', (_message.Message,), dict(
DESCRIPTOR = _REPLY,
__module__ = 'rpc_pb2'
# @@protoc_insertion_point(class_scope:pyRpc2.pyRpc.Reply)
))
_sym_db.RegisterMessage(Reply)
# @@protoc_insertion_point(module_scope)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import flask
from flask import json
from sqlalchemy import exc as sa_exc
from sqlalchemy import sql
from dci.api.v1 import api
from dci.api.v1 import base
from dci.api.v1 import components
from dci.api.v1 import utils as v1_utils
from dci.api.v1 import jobs_events
from dci.api.v1 import tags
from dci import decorators
from dci.common import audits
from dci.common import exceptions as dci_exc
from dci.common import schemas
from dci.common import utils
from dci.db import embeds
from dci.db import models
from dci.api.v1 import files
from dci.api.v1 import issues
from dci.api.v1 import jobstates
from dci.api.v1 import remotecis
from dci import dci_config
_FILES_FOLDER = dci_config.generate_conf()['FILES_UPLOAD_FOLDER']
_TABLE = models.JOBS
_VALID_EMBED = embeds.jobs()
# associate column names with the corresponding SA Column object
_JOBS_COLUMNS = v1_utils.get_columns_name_with_objects(_TABLE)
_EMBED_MANY = {
'files': True,
'topic': False,
'issues': True,
'jobstates': True,
'remoteci': False,
'components': True,
'team': False,
'results': True,
'rconfiguration': False,
'analytics': True,
'tags': True
}
@api.route('/jobs', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def create_jobs(user):
values = v1_utils.common_values_dict()
values.update(schemas.job.post(flask.request.json))
components_ids = values.pop('components')
values['team_id'] = values.get('team_id', user['team_id'])
# Only super admin can create job for other teams
if user.is_not_super_admin() and not user.is_in_team(values['team_id']):
raise dci_exc.Unauthorized()
topic_id = values.get('topic_id')
if topic_id:
v1_utils.verify_team_in_topic(user, topic_id)
previous_job_id = values.get('previous_job_id')
if previous_job_id:
v1_utils.verify_existence_and_get(previous_job_id, _TABLE)
values.update({
'status': 'new',
'remoteci_id': user.id,
'topic_id': topic_id,
'rconfiguration_id': values['rconfiguration_id'],
'user_agent': flask.request.environ.get('HTTP_USER_AGENT'),
'client_version': flask.request.environ.get(
'HTTP_CLIENT_VERSION'
),
'previous_job_id': previous_job_id,
})
# create the job and feed the jobs_components table
with flask.g.db_conn.begin():
query = _TABLE.insert().values(**values)
flask.g.db_conn.execute(query)
jobs_components_to_insert = []
for cmpt_id in components_ids:
v1_utils.verify_existence_and_get(cmpt_id, models.COMPONENTS)
jobs_components_to_insert.append({'job_id': values['id'],
'component_id': cmpt_id})
if jobs_components_to_insert:
flask.g.db_conn.execute(models.JOIN_JOBS_COMPONENTS.insert(),
jobs_components_to_insert)
return flask.Response(json.dumps({'job': values}), 201,
headers={'ETag': values['etag']},
content_type='application/json')
def _build_job(topic_id, remoteci, components_ids, values,
previous_job_id=None, update_previous_job_id=None):
component_types, rconfiguration = components.get_component_types(
topic_id, remoteci['id'])
schedule_components_ids = components.get_schedule_components_ids(
topic_id, component_types, components_ids)
values.update({
'topic_id': topic_id,
'rconfiguration_id': rconfiguration['id'] if rconfiguration else None, # noqa
'team_id': remoteci['team_id'],
'previous_job_id': previous_job_id,
'update_previous_job_id': update_previous_job_id
})
with flask.g.db_conn.begin():
# create the job
flask.g.db_conn.execute(_TABLE.insert().values(**values))
if len(schedule_components_ids) > 0:
# Adds the components to the jobs using join_jobs_components
job_components = [
{'job_id': values['id'], 'component_id': sci}
for sci in schedule_components_ids
]
flask.g.db_conn.execute(
models.JOIN_JOBS_COMPONENTS.insert(), job_components
)
return values
def _get_job(user, job_id, embed=None):
# build the query thanks to the QueryBuilder class
args = {'embed': embed}
query = v1_utils.QueryBuilder(_TABLE, args, _JOBS_COLUMNS)
if user.is_not_super_admin() and not user.is_read_only_user():
query.add_extra_condition(_TABLE.c.team_id.in_(user.teams_ids))
query.add_extra_condition(_TABLE.c.id == job_id)
query.add_extra_condition(_TABLE.c.state != 'archived')
nb_rows = query.get_number_of_rows()
rows = query.execute(fetchall=True)
rows = v1_utils.format_result(rows, _TABLE.name, args['embed'],
_EMBED_MANY)
if len(rows) != 1:
raise dci_exc.DCINotFound('Job', job_id)
job = rows[0]
return job, nb_rows
@api.route('/jobs/schedule', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def schedule_jobs(user):
"""Dispatch jobs to remotecis.
The remoteci can use this method to request a new job.
Before a job is dispatched, the server will flag as 'killed' all the
running jobs that were associated with the remoteci. This is because they
will never be finished.
"""
values = schemas.job_schedule.post(flask.request.json)
values.update({
'id': utils.gen_uuid(),
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'status': 'new',
'remoteci_id': user.id,
'user_agent': flask.request.environ.get('HTTP_USER_AGENT'),
'client_version': flask.request.environ.get(
'HTTP_CLIENT_VERSION'
),
})
topic_id = values.pop('topic_id')
components_ids = values.pop('components_ids')
# check remoteci and topic
remoteci = v1_utils.verify_existence_and_get(user.id, models.REMOTECIS)
topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
if topic['state'] != 'active':
msg = 'Topic %s:%s not active.' % (topic['id'], topic['name'])
raise dci_exc.DCIException(msg, status_code=412)
if remoteci['state'] != 'active':
message = 'RemoteCI "%s" is disabled.' % remoteci['id']
raise dci_exc.DCIException(message, status_code=412)
v1_utils.verify_team_in_topic(user, topic_id)
remotecis.kill_existing_jobs(remoteci['id'])
values = _build_job(topic_id, remoteci, components_ids, values)
return flask.Response(json.dumps({'job': values}), 201,
headers={'ETag': values['etag']},
content_type='application/json')
@api.route('/jobs/<uuid:job_id>/update', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def create_new_update_job_from_an_existing_job(user, job_id):
"""Create a new job in the same topic as the job_id provided and
associate the latest components of this topic."""
values = {
'id': utils.gen_uuid(),
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'status': 'new'
}
original_job_id = job_id
original_job = v1_utils.verify_existence_and_get(original_job_id,
models.JOBS)
if not user.is_in_team(original_job['team_id']):
raise dci_exc.Unauthorized()
# get the remoteci
remoteci_id = str(original_job['remoteci_id'])
remoteci = v1_utils.verify_existence_and_get(remoteci_id,
models.REMOTECIS)
values.update({'remoteci_id': remoteci_id})
# get the associated topic
topic_id = str(original_job['topic_id'])
v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
values.update({
'user_agent': flask.request.environ.get('HTTP_USER_AGENT'),
'client_version': flask.request.environ.get(
'HTTP_CLIENT_VERSION'
),
})
values = _build_job(topic_id, remoteci, [], values,
update_previous_job_id=original_job_id)
return flask.Response(json.dumps({'job': values}), 201,
headers={'ETag': values['etag']},
content_type='application/json')
@api.route('/jobs/upgrade', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def create_new_upgrade_job_from_an_existing_job(user):
"""Create a new job in the 'next topic' of the topic of
the provided job_id."""
values = schemas.job_upgrade.post(flask.request.json)
values.update({
'id': utils.gen_uuid(),
'created_at': datetime.datetime.utcnow().isoformat(),
'updated_at': datetime.datetime.utcnow().isoformat(),
'etag': utils.gen_etag(),
'status': 'new'
})
original_job_id = values.pop('job_id')
original_job = v1_utils.verify_existence_and_get(original_job_id,
models.JOBS)
if not user.is_in_team(original_job['team_id']):
raise dci_exc.Unauthorized()
# get the remoteci
remoteci_id = str(original_job['remoteci_id'])
remoteci = v1_utils.verify_existence_and_get(remoteci_id,
models.REMOTECIS)
values.update({'remoteci_id': remoteci_id})
# get the associated topic
topic_id = str(original_job['topic_id'])
topic = v1_utils.verify_existence_and_get(topic_id, models.TOPICS)
values.update({
'user_agent': flask.request.environ.get('HTTP_USER_AGENT'),
'client_version': flask.request.environ.get(
'HTTP_CLIENT_VERSION'
),
})
next_topic_id = topic['next_topic_id']
if not next_topic_id:
raise dci_exc.DCIException(
"topic %s does not contains a next topic" % topic_id)
# instantiate a new job in the next_topic_id
# todo(yassine): make possible the upgrade to choose specific components
values = _build_job(next_topic_id, remoteci, [], values,
previous_job_id=original_job_id)
return flask.Response(json.dumps({'job': values}), 201,
headers={'ETag': values['etag']},
content_type='application/json')
@api.route('/jobs', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_all_jobs(user, topic_id=None):
"""Get all jobs.
If topic_id is not None, then return all the jobs with a topic
pointed by topic_id.
"""
# get the diverse parameters
args = schemas.args(flask.request.args.to_dict())
# build the query thanks to the QueryBuilder class
query = v1_utils.QueryBuilder(_TABLE, args, _JOBS_COLUMNS)
# add extra conditions for filtering
# # If not admin nor rh employee then restrict the view to the team
if user.is_not_super_admin() and not user.is_read_only_user():
query.add_extra_condition(_TABLE.c.team_id.in_(user.teams_ids))
# # If topic_id not None, then filter by topic_id
if topic_id is not None:
query.add_extra_condition(_TABLE.c.topic_id == topic_id)
# # Get only the non archived jobs
query.add_extra_condition(_TABLE.c.state != 'archived')
nb_rows = query.get_number_of_rows()
rows = query.execute(fetchall=True)
rows = v1_utils.format_result(rows, _TABLE.name, args['embed'],
_EMBED_MANY)
return flask.jsonify({'jobs': rows, '_meta': {'count': nb_rows}})
@api.route('/jobs/<uuid:job_id>/components', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_components_from_job(user, job_id):
job, nb_rows = _get_job(user, job_id, ['components'])
return flask.jsonify({'components': job['components'],
'_meta': {'count': nb_rows}})
@api.route('/jobs/<uuid:j_id>/jobstates', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_jobstates_by_job(user, j_id):
v1_utils.verify_existence_and_get(j_id, _TABLE)
return jobstates.get_all_jobstates(j_id=j_id)
@api.route('/jobs/<uuid:job_id>', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_job_by_id(user, job_id):
job = v1_utils.verify_existence_and_get(job_id, _TABLE)
job_dict = dict(job)
job_dict['issues'] = json.loads(
issues.get_issues_by_resource(job_id, _TABLE).response[0]
)['issues']
return base.get_resource_by_id(user, job_dict, _TABLE, _EMBED_MANY)
@api.route('/jobs/<uuid:job_id>', methods=['PUT'])
@decorators.login_required
@decorators.check_roles
@audits.log
def update_job_by_id(user, job_id):
"""Update a job
"""
# get If-Match header
if_match_etag = utils.check_and_get_etag(flask.request.headers)
# get the diverse parameters
values = schemas.job.put(flask.request.json)
job = v1_utils.verify_existence_and_get(job_id, _TABLE)
job = dict(job)
if not user.is_in_team(job['team_id']):
raise dci_exc.Unauthorized()
# Update jobstate if needed
status = values.get('status')
if status and job.get('status') != status:
jobstates.insert_jobstate(user, {
'status': status,
'job_id': job_id
})
if status in models.FINAL_STATUSES:
jobs_events.create_event(job_id, status, job['topic_id'])
where_clause = sql.and_(_TABLE.c.etag == if_match_etag,
_TABLE.c.id == job_id)
values['etag'] = utils.gen_etag()
query = _TABLE.update().returning(*_TABLE.columns).\
where(where_clause).values(**values)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIConflict('Job', job_id)
return flask.Response(
json.dumps({'job': result.fetchone()}), 200,
headers={'ETag': values['etag']},
content_type='application/json'
)
@api.route('/jobs/<uuid:j_id>/files', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def add_file_to_jobs(user, j_id):
values = schemas.job.post(flask.request.json)
values.update({'job_id': j_id})
return files.create_files(user, values)
@api.route('/jobs/<uuid:j_id>/issues', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def retrieve_issues_from_job(user, j_id):
"""Retrieve all issues attached to a job."""
return issues.get_issues_by_resource(j_id, _TABLE)
@api.route('/jobs/<uuid:j_id>/issues', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def attach_issue_to_jobs(user, j_id):
"""Attach an issue to a job."""
return issues.attach_issue(j_id, _TABLE, user['id'])
@api.route('/jobs/<uuid:j_id>/issues/<uuid:i_id>', methods=['DELETE'])
@decorators.login_required
@decorators.check_roles
def unattach_issue_from_job(user, j_id, i_id):
"""Unattach an issue to a job."""
return issues.unattach_issue(j_id, i_id, _TABLE)
@api.route('/jobs/<uuid:j_id>/files', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_all_files_from_jobs(user, j_id):
"""Get all files.
"""
return files.get_all_files(j_id)
@api.route('/jobs/<uuid:j_id>/results', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_all_results_from_jobs(user, j_id):
"""Get all results from job.
"""
job = v1_utils.verify_existence_and_get(j_id, _TABLE)
if not user.is_in_team(job['team_id']) and not user.is_read_only_user():
raise dci_exc.Unauthorized()
# get testscases from tests_results
query = sql.select([models.TESTS_RESULTS]). \
where(models.TESTS_RESULTS.c.job_id == job['id'])
all_tests_results = flask.g.db_conn.execute(query).fetchall()
results = []
for test_result in all_tests_results:
test_result = dict(test_result)
results.append({'filename': test_result['name'],
'name': test_result['name'],
'total': test_result['total'],
'failures': test_result['failures'],
'errors': test_result['errors'],
'skips': test_result['skips'],
'time': test_result['time'],
'regressions': test_result['regressions'],
'successfixes': test_result['successfixes'],
'success': test_result['success'],
'file_id': test_result['file_id']})
return flask.jsonify({'results': results,
'_meta': {'count': len(results)}})
@api.route('/jobs/<uuid:j_id>', methods=['DELETE'])
@decorators.login_required
@decorators.check_roles
def delete_job_by_id(user, j_id):
# get If-Match header
if_match_etag = utils.check_and_get_etag(flask.request.headers)
job = v1_utils.verify_existence_and_get(j_id, _TABLE)
if not user.is_in_team(job['team_id']):
raise dci_exc.Unauthorized()
with flask.g.db_conn.begin():
values = {'state': 'archived'}
where_clause = sql.and_(_TABLE.c.id == j_id,
_TABLE.c.etag == if_match_etag)
query = _TABLE.update().where(where_clause).values(**values)
result = flask.g.db_conn.execute(query)
if not result.rowcount:
raise dci_exc.DCIDeleteConflict('Job', j_id)
for model in [models.FILES]:
query = model.update().where(model.c.job_id == j_id).values(
**values
)
flask.g.db_conn.execute(query)
return flask.Response(None, 204, content_type='application/json')
@api.route('/jobs/<uuid:job_id>/tags', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_tags_from_job(user, job_id):
"""Retrieve all tags attached to a job."""
job = v1_utils.verify_existence_and_get(job_id, _TABLE)
if not user.is_in_team(job['team_id']) and not user.is_read_only_user():
raise dci_exc.Unauthorized()
JTT = models.JOIN_JOBS_TAGS
query = (sql.select([models.TAGS])
.select_from(JTT.join(models.TAGS))
.where(JTT.c.job_id == job_id))
rows = flask.g.db_conn.execute(query)
return flask.jsonify({'tags': rows, '_meta': {'count': rows.rowcount}})
@api.route('/jobs/<uuid:job_id>/tags', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def add_tag_to_job(user, job_id):
"""Add a tag to a job."""
job = v1_utils.verify_existence_and_get(job_id, _TABLE)
if not user.is_in_team(job['team_id']):
raise dci_exc.Unauthorized()
values = {
'job_id': job_id
}
job_tagged = tags.add_tag_to_resource(values, models.JOIN_JOBS_TAGS)
return flask.Response(json.dumps(job_tagged), 201,
content_type='application/json')
@api.route('/jobs/<uuid:job_id>/tags/<uuid:tag_id>', methods=['DELETE'])
@decorators.login_required
@decorators.check_roles
def delete_tag_from_job(user, job_id, tag_id):
"""Delete a tag from a job."""
_JJT = models.JOIN_JOBS_TAGS
job = v1_utils.verify_existence_and_get(job_id, _TABLE)
if not user.is_in_team(job['team_id']):
raise dci_exc.Unauthorized()
v1_utils.verify_existence_and_get(tag_id, models.TAGS)
query = _JJT.delete().where(sql.and_(_JJT.c.tag_id == tag_id,
_JJT.c.job_id == job_id))
try:
flask.g.db_conn.execute(query)
except sa_exc.IntegrityError:
raise dci_exc.DCICreationConflict('tag', 'tag_id')
return flask.Response(None, 204, content_type='application/json')
@api.route('/jobs/purge', methods=['GET'])
@decorators.login_required
@decorators.check_roles
def get_to_purge_archived_jobs(user):
return base.get_to_purge_archived_resources(user, _TABLE)
@api.route('/jobs/purge', methods=['POST'])
@decorators.login_required
@decorators.check_roles
def purge_archived_jobs(user):
files.purge_archived_files()
return base.purge_archived_resources(user, _TABLE)
| |
# Standard imports
import numpy as np
import json
import logging
from dateutil import parser
import math
import pandas as pd
import attrdict as ad
import datetime as pydt
import time as time
import pytz
# Our imports
import emission.analysis.point_features as pf
import emission.analysis.intake.cleaning.cleaning_methods.speed_outlier_detection as eaico
import emission.analysis.intake.cleaning.cleaning_methods.jump_smoothing as eaicj
import emission.storage.pipeline_queries as epq
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.entry as ecwe
import emission.core.wrapper.metadata as ecwm
import emission.core.wrapper.smoothresults as ecws
import emission.storage.decorations.useful_queries as taug
import emission.storage.decorations.location_queries as lq
import emission.core.get_database as edb
import emission.core.common as ec
np.set_printoptions(suppress=True)
def recalc_speed(points_df):
"""
The input dataframe already has "speed" and "distance" columns.
Drop them and recalculate speeds from the first point onwards.
The speed column has the speed between each point and its previous point.
The first row has a speed of zero.
"""
stripped_df = points_df.drop("speed", axis=1).drop("distance", axis=1)
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:])
distances = [pf.calDistance(p1, p2) for (p1, p2) in zipped_points_list]
distances.insert(0, 0)
with_speeds_df = pd.concat([stripped_df, pd.Series(distances, index=points_df.index, name="distance")], axis=1)
speeds = [pf.calSpeed(p1, p2) for (p1, p2) in zipped_points_list]
speeds.insert(0, 0)
with_speeds_df = pd.concat([with_speeds_df, pd.Series(speeds, index=points_df.index, name="speed")], axis=1)
return with_speeds_df
def add_dist_heading_speed(points_df):
# type: (pandas.DataFrame) -> pandas.DataFrame
"""
Returns a new dataframe with an added "speed" column.
The speed column has the speed between each point and its previous point.
The first row has a speed of zero.
"""
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:])
distances = [pf.calDistance(p1, p2) for (p1, p2) in zipped_points_list]
distances.insert(0, 0)
speeds = [pf.calSpeed(p1, p2) for (p1, p2) in zipped_points_list]
speeds.insert(0, 0)
headings = [pf.calHeading(p1, p2) for (p1, p2) in zipped_points_list]
headings.insert(0, 0)
with_distances_df = pd.concat([points_df, pd.Series(distances, name="distance")], axis=1)
with_speeds_df = pd.concat([with_distances_df, pd.Series(speeds, name="speed")], axis=1)
with_headings_df = pd.concat([with_speeds_df, pd.Series(headings, name="heading")], axis=1)
return with_headings_df
def add_heading_change(points_df):
"""
Returns a new dataframe with an added "heading_change" column.
The heading change column has the heading change between this point and the
two points preceding it. The first two rows have a speed of zero.
"""
point_list = [ad.AttrDict(row) for row in points_df.to_dict('records')]
zipped_points_list = zip(point_list, point_list[1:], point_list[2:])
hcs = [pf.calHC(p1, p2, p3) for (p1, p2, p3) in zipped_points_list]
hcs.insert(0, 0)
hcs.insert(1, 0)
with_hcs_df = pd.concat([points_df, pd.Series(hcs, name="heading_change")], axis=1)
return with_hcs_df
def filter_current_sections(user_id):
time_query = epq.get_time_range_for_smoothing(user_id)
try:
sections_to_process = esda.get_objects(esda.RAW_SECTION_KEY, user_id,
time_query)
for section in sections_to_process:
logging.info("^" * 20 + ("Smoothing section %s for user %s" % (section.get_id(), user_id)) + "^"
* 20)
filter_jumps(user_id, section.get_id())
if len(sections_to_process) == 0:
# Didn't process anything new so start at the same point next time
last_section_processed = None
else:
last_section_processed = sections_to_process[-1]
epq.mark_smoothing_done(user_id, last_section_processed)
except:
logging.exception("Marking smoothing as failed")
epq.mark_smoothing_failed(user_id)
def filter_jumps(user_id, section_id):
"""
filters out any jumps in the points related to this section and stores a entry that lists the deleted points for
this trip and this section.
:param user_id: the user id to filter the trips for
:param section_id: the section_id to filter the trips for
:return: none. saves an entry with the filtered points into the database.
"""
logging.debug("filter_jumps(%s, %s) called" % (user_id, section_id))
outlier_algo = eaico.BoxplotOutlier()
filtering_algo = eaicj.SmoothZigzag()
tq = esda.get_time_query_for_trip_like(esda.RAW_SECTION_KEY, section_id)
ts = esta.TimeSeries.get_time_series(user_id)
section_points_df = ts.get_data_df("background/filtered_location", tq)
logging.debug("len(section_points_df) = %s" % len(section_points_df))
points_to_ignore_df = get_points_to_filter(section_points_df, outlier_algo, filtering_algo)
if points_to_ignore_df is None:
# There were no points to delete
return
deleted_point_id_list = list(points_to_ignore_df._id)
logging.debug("deleted %s points" % len(deleted_point_id_list))
filter_result = ecws.Smoothresults()
filter_result.section = section_id
filter_result.deleted_points = deleted_point_id_list
filter_result.outlier_algo = "BoxplotOutlier"
filter_result.filtering_algo = "SmoothZigzag"
result_entry = ecwe.Entry.create_entry(user_id, "analysis/smoothing", filter_result)
ts.insert(result_entry)
def get_points_to_filter(section_points_df, outlier_algo, filtering_algo):
"""
From the incoming dataframe, filter out large jumps using the specified outlier detection algorithm and
the specified filtering algorithm.
:param section_points_df: a dataframe of points for the current section
:param outlier_algo: the algorithm used to detect outliers
:param filtering_algo: the algorithm used to determine which of those outliers need to be filtered
:return: a dataframe of points that need to be stripped, if any.
None if none of them need to be stripped.
"""
with_speeds_df = add_dist_heading_speed(section_points_df)
logging.debug("section_points_df.shape = %s, with_speeds_df.shape = %s" %
(section_points_df.shape, with_speeds_df.shape))
# if filtering algo is none, there's nothing that can use the max speed
if outlier_algo is not None and filtering_algo is not None:
maxSpeed = outlier_algo.get_threshold(with_speeds_df)
# TODO: Is this the best way to do this? Or should I pass this in as an argument to filter?
# Or create an explicit set_speed() method?
# Or pass the outlier_algo as the parameter to the filtering_algo?
filtering_algo.maxSpeed = maxSpeed
logging.debug("maxSpeed = %s" % filtering_algo.maxSpeed)
if filtering_algo is not None:
try:
filtering_algo.filter(with_speeds_df)
to_delete_mask = np.logical_not(filtering_algo.inlier_mask_)
return with_speeds_df[to_delete_mask]
except Exception as e:
logging.debug("Caught error %s while processing section, skipping..." % e)
return None
else:
logging.debug("no filtering algo specified, returning None")
return None
def get_filtered_points(section_df, outlier_algo, filtering_algo):
"""
Filter the points that correspond to the section object that is passed in.
The section object is an AttrDict with the startTs and endTs fields.
Returns a filtered df with the index after the initial filter for accuracy
TODO: Switch this to the section wrapper object going forward
TODO: Note that here, we assume that the data has already been chunked into sections.
But really, we need to filter (at least for accuracy) before segmenting in
order to avoid issues like https://github.com/e-mission/e-mission-data-collection/issues/45
"""
with_speeds_df = add_dist_heading_speed(section_df)
# if filtering algo is none, there's nothing that can use the max speed
if outlier_algo is not None and filtering_algo is not None:
maxSpeed = outlier_algo.get_threshold(with_speeds_df)
# TODO: Is this the best way to do this? Or should I pass this in as an argument to filter?
# Or create an explicit set_speed() method?
# Or pass the outlier_algo as the parameter to the filtering_algo?
filtering_algo.maxSpeed = maxSpeed
if filtering_algo is not None:
try:
filtering_algo.filter(with_speeds_df)
return with_speeds_df[filtering_algo.inlier_mask_]
except Exception as e:
print ("Caught error %s while processing section, skipping..." % e)
return with_speeds_df
else:
return with_speeds_df
| |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import json
import time
from pysdn.controller.controller import Controller
from pysdn.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
GroupAction,
GroupEntry,
GroupBucket,
OutputAction)
from pysdn.common.utils import load_dict_from_file
from pysdn.common.status import STATUS
from pysdn.common.constants import (OFPGT_FF, ETH_TYPE_IPv4)
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def delete_groups(ofswitch, group_ids):
for group_id in group_ids:
result = ofswitch.delete_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group '%s' successfully removed from the Controller" %
group_id)
else:
print ("!!!Group '%s' removal error, reason: %s" %
(group_id, status.brief()))
def print_groups(lcfg, loper):
q = 10 # number of list items to be in a single chunk (output string)
print "\n".strip()
s = 'Configured Groups IDs'
if lcfg:
chunks = [lcfg[x:x + q] for x in xrange(0, len(lcfg), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
s = 'Operational Groups IDs'
if loper:
chunks = [loper[x:x + q] for x in xrange(0, len(loper), q)]
print " %s :" % s,
for i in range(0, len(chunks)):
n = 0 if i == 0 else len(s) + 18
print "%s%s" % (" " * n, ", ".join(map(str, chunks[i])))
else:
print " %s : %s" % (s, "none")
def of_demo_36():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 36 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print "\n".strip()
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
grp_ids_cfg = []
grp_ids_oper = []
print "\n".strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
# Create new group
group_id = 15
group_type = OFPGT_FF
group_name = "Example of 'link fast failover' group"
watch_port1 = 110
watch_port2 = 111
watch_port3 = 112
out_port1 = 110
out_port2 = 111
out_port3 = 112
print "\n".strip()
print ("<<< Create Group")
print "\n".strip()
print (" Group Type : %s\n"
" Group ID : %s\n"
" Group Name : \"%s\"" %
(group_type.strip('group-').upper(),
group_id, group_name))
print (" Buckets :")
print (" [0] watch-port: %s" %
watch_port1)
print (" actions: Output (%s)" %
out_port1)
print (" [1] watch-port: %s" %
watch_port2)
print (" actions: Output (%s)" %
out_port2)
print (" [2] watch-port: %s" %
watch_port3)
print (" actions: Output (%s)" %
out_port3)
time.sleep(rundelay)
# Allocate a placeholder for the group entry
group_entry = GroupEntry(group_id, group_type)
group_entry.set_group_name(group_name)
# Fill in group entry with action buckets
# ---------
bucket_id = 0
bucket1 = GroupBucket(bucket_id)
bucket1.set_watch_port(watch_port1)
action = OutputAction(order=0, port=out_port1)
bucket1.add_action(action)
group_entry.add_bucket(bucket1)
# ---------
bucket_id += 1
bucket2 = GroupBucket(bucket_id)
bucket2.set_watch_port(watch_port2)
action = OutputAction(order=0, port=out_port2)
bucket2.add_action(action)
group_entry.add_bucket(bucket2)
# ---------
bucket_id += 1
bucket3 = GroupBucket(bucket_id)
bucket3.set_watch_port(watch_port3)
action = OutputAction(order=0, port=out_port3)
bucket3.add_action(action)
group_entry.add_bucket(bucket3)
# Request Controller to create the group
print "\n".strip()
print ("<<< Group to create:")
print group_entry.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_group(group_entry)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group successfully added")
grp_ids_oper = result.get_data()
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' configuration status") % group_id
time.sleep(rundelay)
result = ofswitch.get_configured_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group configuration info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n").strip()
print ("<<< Get group '%s' operational status") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_description(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group operational info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get group '%s' statistics information") % group_id
time.sleep(rundelay)
result = ofswitch.get_group_statistics(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("Group statistics info:")
group = result.get_data()
print json.dumps(group, indent=4)
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Group action example"
priority = 1000
cookie = 1400
match_in_port = 109
match_eth_type = ETH_TYPE_IPv4
print "\n".strip()
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)" %
(match_in_port, hex(match_eth_type)))
print (" Actions: Apply Group (%s)\n" % group_id)
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = GroupAction(action_order)
action.set_group_id(group_id)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
flow_entry1.add_match(match)
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_groups(ofswitch, grp_ids_cfg)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print "\n".strip()
print ("<<< Remove all flows from the Controller")
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print "\n".strip()
print ("<<< Remove all groups from the Controller")
for group_id in grp_ids_cfg:
result = ofswitch.delete_group(group_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Group '%s' successfully removed from the Controller" %
group_id)
else:
print ("\n").strip()
print ("!!!Error, failed to remove group '%s', reason: %s" %
(group_id, status.detailed()))
print ("\n").strip()
print ("<<< Get OpenFlow Groups Information")
time.sleep(rundelay)
result = ofswitch.get_configured_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_cfg = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_cfg = []
else:
print ("\n").strip()
print ("!!!Error, reason: %s" % status.detailed())
result = ofswitch.get_operational_group_ids()
status = result.get_status()
if(status.eq(STATUS.OK)):
grp_ids_oper = result.get_data()
elif(status.eq(STATUS.DATA_NOT_FOUND)):
grp_ids_oper = []
else:
print ("\n")
print ("!!!Error, reason: %s" % status.detailed())
# Show current state of the Group Table in the Controller's
# configuration and operational data stores
print_groups(grp_ids_cfg, grp_ids_oper)
print ("\n").strip()
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
of_demo_36()
| |
# Copyright 2022 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all semantic segmentation models."""
import functools
from typing import Any, Callable, List, Dict, Optional, Tuple
from flax.training import common_utils
from immutabledict import immutabledict
import jax.numpy as jnp
import numpy as np
from scenic.model_lib.base_models import base_model
from scenic.model_lib.base_models import model_utils
GlobalMetricFn = Callable[[List[jnp.ndarray], Dict[str, Any]], Dict[str, float]]
def num_pixels(logits: jnp.ndarray,
one_hot_targets: jnp.ndarray,
weights: Optional[jnp.ndarray] = None) -> float:
"""Computes number of pixels in the target to be used for normalization.
It needs to have the same API as other defined metrics.
Args:
logits: Unused.
one_hot_targets: Targets, in form of one-hot vectors.
weights: Input weights (can be used for accounting the padding in the
input).
Returns:
Number of (non-padded) pixels in the input.
"""
del logits
if weights is None:
return np.prod(one_hot_targets.shape[:3])
assert weights.ndim == 3, (
'For segmentation task, the weights should be a pixel level mask.')
return weights.sum()
# Standard default metrics for the semantic segmentation models.
_SEMANTIC_SEGMENTATION_METRICS = immutabledict({
'accuracy': (model_utils.weighted_correctly_classified, num_pixels),
# The loss is already normalized, so we set num_pixels to 1.0:
'loss': (model_utils.weighted_softmax_cross_entropy, lambda *a, **kw: 1.0)
})
def semantic_segmentation_metrics_function(
logits: jnp.ndarray,
batch: base_model.Batch,
target_is_onehot: bool = False,
metrics: base_model.MetricNormalizerFnDict = _SEMANTIC_SEGMENTATION_METRICS,
) -> Dict[str, Tuple[jnp.ndarray, jnp.ndarray]]:
"""Calculates metrics for the semantic segmentation task.
Currently we assume each metric_fn has the API:
```metric_fn(logits, targets, weights)```
and returns an array of shape [batch_size]. We also assume that to compute
the aggregate metric, one should sum across all batches, then divide by the
total samples seen. In this way we currently only support metrics of the 1/N
sum f(inputs, targets). Note, the caller is responsible for dividing by
the normalizer when computing the mean of each metric.
Args:
logits: Output of model in shape [batch, length, num_classes].
batch: Batch of data that has 'label' and optionally 'batch_mask'.
target_is_onehot: If the target is a one-hot vector.
metrics: The semantic segmentation metrics to evaluate. The key is the name
of the metric, and the value is the metrics function.
Returns:
A dict of metrics, in which keys are metrics name and values are tuples of
(metric, normalizer).
"""
if target_is_onehot:
one_hot_targets = batch['label']
else:
one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])
weights = batch.get('batch_mask') # batch_mask might not be defined
# This psum is required to correctly evaluate with multihost. Only host 0
# will report the metrics, so we must aggregate across all hosts. The psum
# will map an array of shape [n_global_devices, batch_size] -> [batch_size]
# by summing across the devices dimension. The outer sum then sums across the
# batch dim. The result is then we have summed across all samples in the
# sharded batch.
evaluated_metrics = {}
for key, val in metrics.items():
evaluated_metrics[key] = model_utils.psum_metric_normalizer(
(val[0](logits, one_hot_targets, weights),
val[1](logits, one_hot_targets, weights)))
return evaluated_metrics
class SegmentationModel(base_model.BaseModel):
"""Defines commonalities between all semantic segmentation models.
A model is class with three members: get_metrics_fn, loss_fn, and a
flax_model.
get_metrics_fn returns a callable function, metric_fn, that calculates the
metrics and returns a dictionary. The metric function computes f(x_i, y_i) on
a minibatch, it has API:
```metric_fn(logits, label, weights).```
The trainer will then aggregate and compute the mean across all samples
evaluated.
loss_fn is a function of API
loss = loss_fn(logits, batch, model_params=None).
This model class defines a softmax_cross_entropy_loss with weight decay,
where the weight decay factor is determined by config.l2_decay_factor.
flax_model is returned from the build_flax_model function. A typical
usage pattern will be:
```
model_cls = model_lib.models.get_model_cls('simple_cnn_segmentation')
model = model_cls(config, dataset.meta_data)
flax_model = model.build_flax_model
dummy_input = jnp.zeros(input_shape, model_input_dtype)
model_state, params = flax_model.init(
rng, dummy_input, train=False).pop('params')
```
And this is how to call the model:
variables = {'params': params, **model_state}
logits, new_model_state = flax_model.apply(variables, inputs, ...)
```
"""
def get_metrics_fn(self, split: Optional[str] = None) -> base_model.MetricFn:
"""Returns a callable metric function for the model.
Args:
split: The split for which we calculate the metrics. It should be one of
the ['train', 'validation', 'test'].
Returns: A metric function with the following API: ```metrics_fn(logits,
batch)```
"""
del split # For all splits, we return the same metric functions.
return functools.partial(
semantic_segmentation_metrics_function,
target_is_onehot=self.dataset_meta_data.get('target_is_onehot', False),
metrics=_SEMANTIC_SEGMENTATION_METRICS)
def loss_function(self,
logits: jnp.ndarray,
batch: base_model.Batch,
model_params: Optional[jnp.ndarray] = None) -> float:
"""Returns softmax cross entropy loss with an L2 penalty on the weights.
Args:
logits: Output of model in shape [batch, length, num_classes].
batch: Batch of data that has 'label' and optionally 'batch_mask'.
model_params: Parameters of the model, for optionally applying
regularization.
Returns:
Total loss.
"""
weights = batch.get('batch_mask')
if self.dataset_meta_data.get('target_is_onehot', False):
one_hot_targets = batch['label']
else:
one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])
sof_ce_loss = model_utils.weighted_softmax_cross_entropy(
logits,
one_hot_targets,
weights,
label_smoothing=self.config.get('label_smoothing'),
label_weights=self.get_label_weights())
if self.config.get('l2_decay_factor') is None:
total_loss = sof_ce_loss
else:
l2_loss = model_utils.l2_regularization(model_params)
total_loss = sof_ce_loss + 0.5 * self.config.l2_decay_factor * l2_loss
return total_loss
def get_label_weights(self) -> jnp.ndarray:
"""Returns labels' weights to be used for computing weighted loss.
This can used for weighting the loss terms based on the amount of available
data for each class, when we have un-balances data for different classes.
"""
if not self.config.get('class_rebalancing_factor'):
return None
if 'class_proportions' not in self.dataset_meta_data:
raise ValueError(
'When `class_rebalancing_factor` is nonzero, `class_proportions` must'
' be provided in `dataset_meta_data`.')
w = self.config.get('class_rebalancing_factor')
assert 0.0 <= w <= 1.0, '`class_rebalancing_factor` must be in [0.0, 1.0]'
proportions = self.dataset_meta_data['class_proportions']
proportions = np.maximum(proportions / np.sum(proportions), 1e-8)
# Interpolate between no rebalancing (w==0.0) and full reweighting (w==1.0):
proportions = w * proportions + (1.0 - w)
weights = 1.0 / proportions
weights /= np.sum(weights) # Normalize so weights sum to 1.
weights *= len(weights) # Scale so weights sum to num_classes.
return weights
def get_global_metrics_fn(self) -> GlobalMetricFn:
"""Returns a callable metric function for global metrics.
The return function implements metrics that require the prediction for the
entire test/validation dataset in one place and has the following API:
```global_metrics_fn(all_confusion_mats, dataset_metadata)```
If return None, no global metrics will be computed.
"""
return global_metrics_fn
def build_flax_model(self):
raise NotImplementedError('Subclasses must implement build_flax_model().')
def default_flax_model_config(self):
"""Default config for the flax model that is built in `build_flax_model`.
This function in particular serves the testing functions and supposed to
provide config tha are passed to the flax_model when it's build in
`build_flax_model` function, e.g., `model_dtype_str`.
"""
raise NotImplementedError(
'Subclasses must implement default_flax_model_config().')
def global_metrics_fn(all_confusion_mats: List[jnp.ndarray],
dataset_metadata: Dict[str, Any]) -> Dict[str, float]:
"""Returns a dict with global (whole-dataset) metrics."""
# Compute mIoU from list of confusion matrices:
assert isinstance(all_confusion_mats, list) # List of eval batches.
cm = np.sum(all_confusion_mats, axis=0) # Sum over eval batches.
assert cm.ndim == 3, ('Expecting confusion matrix to have shape '
'[batch_size, num_classes, num_classes], got '
f'{cm.shape}.')
cm = np.sum(cm, axis=0) # Sum over batch dimension.
mean_iou, iou_per_class = model_utils.mean_iou(cm)
metrics_dict = {'mean_iou': float(mean_iou)}
for label, iou in enumerate(iou_per_class):
tag = f'iou_per_class/{label:02.0f}'
if 'class_names' in dataset_metadata:
tag = f"{tag}_{dataset_metadata['class_names'][label]}"
metrics_dict[tag] = float(iou)
return metrics_dict
| |
from rest_framework import status
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import detail_route
from . import models, serializers, permissions
class StripeResourceViewset(ModelViewSet):
""" a typical ModelViewSet that chooses its serializer based on the request type.
this requires that the viewset has two additional parameters, namely
`create_stripe_serializer` and `update_stripe_serializer`. these serializers will be
used to handle create and update interactions with the stripe api resource.
"""
create_stripe_serializer = None
update_stripe_serializer = None
permission_classes = (permissions.OwnerOnlyPermission,)
def options(self, request, *args, **kwargs):
""" better formating for API browsing
"""
if self.metadata_class is None: # pragma: no cover
return self.http_method_not_allowed(request, *args, **kwargs)
mdc = self.metadata_class()
data = mdc.determine_metadata(request, self)
create = self.create_stripe_serializer
update = self.update_stripe_serializer
create = mdc.get_serializer_info(create()) if create else None
update = mdc.get_serializer_info(update()) if update else None
if not data.get("actions"): # pragma: no cover
data["actions"] = {}
if create:
data["actions"]["POST"] = create
if update: # pragma: no branch
data["actions"]["PUT"] = update
data["actions"]["PATCH"] = update
return Response(data)
def create(self, request, *args, **kwargs):
""" since all stripe resource objects have a required `owner` foreign key, auto
matically set the requesting users id to the `owner` field value
"""
request.data["owner"] = request.user.id
return super().create(request, *args, **kwargs)
def get_serializer_class(self):
""" gets a serializer based on the request action type
"""
if self.action == "create":
return self.create_stripe_serializer
elif self.action in ("update", "partial_update"):
return self.update_stripe_serializer
return super().get_serializer_class()
def filter_queryset(self, queryset):
""" no one can view objects that they do not own!
"""
return queryset.filter(owner=self.request.user)
def perform_destroy(self, instance):
stripe_instance = instance.retrieve_stripe_api_instance()
stripe_instance.delete()
instance.delete()
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
@detail_route(methods=["get"])
def refresh(self, request, *args, **kwargs):
""" For whatever reason a model might need to be refreshed by a client a detail
route /<resource>/<pk>/refresh/ is available.
"""
instance = self.get_object()
instance.refresh_from_stripe_api()
instance.save()
serializer = self.get_serializer(instance)
return Response(serializer.data)
class SingleObjectUpdateOnly:
def create(self, request, *args, **kwargs): # pragma: no cover
return Response({"detail": "POST method not allowed."},
code=status.HTTP_405_METHOD_NOT_ALLOWED)
def delete(self, request, *args, **kwargs): # pragma: no cover
return Response({"detail": "DELETE method not allowed."},
code=status.HTTP_405_METHOD_NOT_ALLOWED)
class CardViewset(StripeResourceViewset):
""" Normal CRUD operations on the stripe Card resource.
a POST request expects a json document like this::
{
"token": "tok_fdsionNKO532N32nL",
"card_type": "customer" or "merchant"
}
"""
model = models.Card
queryset = models.Card.objects.all()
serializer_class = serializers.CardSerializer
create_stripe_serializer = serializers.CreateCardResourceSerializer
update_stripe_serializer = serializers.UpdateCardResourceSerializer
permission_classes = (permissions.PaymentTypePermission,)
class BankAccountViewset(StripeResourceViewset):
""" Normal CRUD operations on the stripe BankAccount resource.
a POST request expects a json document like this::
{
"token": "tok_fdsionNKO532N32nL",
"card_type": "customer" or "merchant"
}
"""
model = models.BankAccount
queryset = models.BankAccount.objects.all()
serializer_class = serializers.BankAccountSerializer
create_stripe_serializer = serializers.CreateBankAccountResourceSerializer
update_stripe_serializer = serializers.UpdateBankAccountResourceSerializer
permission_classes = (permissions.PaymentTypePermission,)
class ConnectedAccountViewset(StripeResourceViewset):
""" Normal CRUD operations on the stripe Account resource.
"""
model = models.ConnectedAccount
queryset = models.ConnectedAccount.objects.all()
serializer_class = serializers.ConnectedAccountSerializer
create_stripe_serializer = serializers.CreateConnectedAccountResourceSerializer
update_stripe_serializer = serializers.UpdateConnectedAccountResourceSerializer
class SubscriptionViewset(StripeResourceViewset):
""" Normal CRUD operations on the stripe Subscription resource.
a POST request expects a json document like this::
{
"plan": 2, // primary key of local plan model
"coupon": null // primarty key of local coupon model
}
"""
model = models.Subscription
queryset = models.Subscription.objects.all()
serializer_class = serializers.SubscriptionSerializer
create_stripe_serializer = serializers.CreateSubscriptionResourceSerializer
update_stripe_serializer = serializers.UpdateSubscriptionResourceSerializer
permission_classes = (permissions.CustomerOnlyPermission, )
def create(self, request, *args, **kwargs):
request.data["customer"] = request.user.stripe_customer.id
return super().create(request, *args, **kwargs)
class CustomerViewset(SingleObjectUpdateOnly, StripeResourceViewset):
"""
"""
model = models.Customer
queryset = models.Customer.objects.all()
serializer_class = serializers.CustomerSerializer
update_stripe_serializer = serializers.UpdateCustomerResourceSerializer
permission_classes = (permissions.CustomerOnlyPermission, )
class ChargeViewset(ReadOnlyModelViewSet):
"""
"""
model = models.Charge
queryset = models.Charge.objects.all()
serializer_class = serializers.ChargeSerializer
permission_classes = (permissions.CustomerOnlyPermission, )
def filter_queryset(self, queryset):
return queryset.filter(owner=self.request.user)
class TransferViewset(ReadOnlyModelViewSet):
"""
"""
model = models.Transfer
queryset = models.Transfer.objects.all()
serializer_class = serializers.TransferSerializer
permission_classes = (permissions.MerchantOnlyPermission, )
def filter_queryset(self, queryset):
return queryset.filter(owner=self.request.user)
class RefundViewset(ReadOnlyModelViewSet):
"""
"""
model = models.Refund
queryset = models.Refund.objects.all()
serializer_class = serializers.RefundSerializer
permission_classes = (permissions.CustomerOnlyPermission, )
def filter_queryset(self, queryset):
return queryset.filter(owner=self.request.user)
| |
patches = [
# LoggingConfiguration FieldToMatch is different from WebACL or RuleGroup FieldToMatch
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::LoggingConfiguration.FieldToMatch",
"path": "/PropertyTypes/AWS::WAFv2::LoggingConfiguration.LoggingConfigurationFieldToMatch",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::WAFv2::LoggingConfiguration/Properties/RedactedFields/ItemType",
"value": "LoggingConfigurationFieldToMatch",
},
# backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.Rule",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.WebACLRule",
},
# backward compatibility
{
"op": "replace",
"path": "/ResourceTypes/AWS::WAFv2::WebACL/Properties/Rules/ItemType",
"value": "WebACLRule",
},
# backward compatibility
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::RuleGroup.Rule",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.RuleGroupRule",
},
# backward compatibility
{
"op": "replace",
"path": "/ResourceTypes/AWS::WAFv2::RuleGroup/Properties/Rules/ItemType",
"value": "RuleGroupRule",
},
# backward compatibility - StatementOne
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.Statement",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne/Properties/AndStatement/Type",
"value": "AndStatementOne",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne/Properties/NotStatement/Type",
"value": "NotStatementOne",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne/Properties/OrStatement/Type",
"value": "OrStatementOne",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne/Properties/RateBasedStatement/Type",
"value": "RateBasedStatementOne",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatement",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatementOne",
},
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatementTwo",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatement",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatementOne",
},
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatementTwo",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatement",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatementOne",
},
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatementTwo",
},
{
"op": "move",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatement",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatementOne",
},
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatementOne/Properties/Statements/ItemType",
"value": "StatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatementOne/Properties/Statement/Type",
"value": "StatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatementOne/Properties/Statements/ItemType",
"value": "StatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatementOne/Properties/ScopeDownStatement/Type",
"value": "StatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.AndStatementTwo/Properties/Statements/ItemType",
"value": "StatementThree",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.NotStatementTwo/Properties/Statement/Type",
"value": "StatementThree",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.OrStatementTwo/Properties/Statements/ItemType",
"value": "StatementThree",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.RateBasedStatementTwo/Properties/ScopeDownStatement/Type",
"value": "StatementThree",
},
# backward compatibility - StatementTwo
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementTwo/Properties/AndStatement/Type",
"value": "AndStatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementTwo/Properties/NotStatement/Type",
"value": "NotStatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementTwo/Properties/OrStatement/Type",
"value": "OrStatementTwo",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementTwo/Properties/RateBasedStatement/Type",
"value": "RateBasedStatementTwo",
},
# backward compatibility - StatementThree
{
"op": "copy",
"from": "/PropertyTypes/AWS::WAFv2::WebACL.StatementOne",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementThree",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementThree/Properties/AndStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementThree/Properties/NotStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementThree/Properties/OrStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.StatementThree/Properties/RateBasedStatement",
},
# Insert StatementOne into RuleGroupRule and WebACLRule
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.RuleGroupRule/Properties/Statement/Type",
"value": "StatementOne",
},
{
"op": "replace",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.WebACLRule/Properties/Statement/Type",
"value": "StatementOne",
},
# backward compatibility - remove ManagedRuleGroupStatement ScopeDownStatement
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::WebACL.ManagedRuleGroupStatement/Properties/ScopeDownStatement",
},
# Remove redundent RuleGroup properties
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.AndStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.LabelSummary",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.NotStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.OrStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.RateBasedStatement",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::WAFv2::RuleGroup.Statement",
},
]
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:10011")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:10011")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a KashmirCoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a KashmirCoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The gpsubprocess is a subclass of subprocess that allows for non-blocking
processing of status and servicing of stdout and stderr.
Also the aim was to reduce the overhead associated with this servicing.
Normal subprocess.communicate() results in up to 3 threads being created
(stdin,stdout,stderr) and then a blocking wait for the process and threads
to finish. This doesn't allow the user to be able to cancel long running
tasks.
"""
import errno
import fcntl
import os
import select
import subprocess
import time
from gppylib import gplog
logger=gplog.get_default_logger()
class Popen(subprocess.Popen):
cancelRequested=False
def communicate2(self, timeout=2,input=None):
""" An extension to communicate() that allows for external cancels
to abort processing.
All internal I/O calls are non-blocking.
The timeout is in seconds and is the max. amount of time to wait in
select() for something to read from either stdout or stderr. This
then effects how responsive it will be to cancel requests.
"""
terminated=False
output = []
error = []
self._setupNonblocking()
if self.stdin:
if input:
self.stdin.write(input)
# ESCALATION-151 - always close stdin, even when no input from caller
self.stdin.close()
while not (terminated or self.cancelRequested):
terminated = self._check_status()
if not terminated:
self._read_files(timeout,output,error)
# Consume rest of output
self._finish_read_files(timeout,output,error)
(resout,reserr)=self._postprocess_outputs(output,error)
return (self.returncode,resout,reserr)
def cancel(self):
"""Sets a flag that will cause execution to halt during the next
select cycle.
This amount of time is based on the timeout specified.
"""
logger.debug("cancel is requested")
self.cancelRequested=True
def _setupNonblocking(self):
""" sets stdout and stderr fd's to be non-blocking.
The fcntl throws an IOError if these calls fail.
"""
fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NDELAY | os.O_NONBLOCK)
fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NDELAY | os.O_NONBLOCK)
def _postprocess_outputs(self,output,error):
# All data exchanged. Translate lists into strings.
if output is not None:
output = ''.join(output)
if error is not None:
error = ''.join(error)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if output:
output = self._translate_newlines(output)
if error:
error = self._translate_newlines(error)
return (output,error)
def _read_files(self,timeout,output,error):
readList=[]
readList.append(self.stdout)
readList.append(self.stderr)
writeList = []
errorList=[]
(rset,wset,eset) = self.__select(readList,writeList, errorList, timeout)
if self.stdout in rset:
output.append(os.read(self.stdout.fileno(),8192))
if self.stderr in rset:
error.append(os.read(self.stderr.fileno(),8192))
def _finish_read_files(self, timeout, output, error):
"""This function reads the rest of stderr and stdout and appends
it to error and output. This ensures that all output is received"""
bytesRead=0
# consume rest of output
try:
(rset,wset,eset) = self.__select([self.stdout],[],[], timeout)
while (self.stdout in rset):
buffer = os.read(self.stdout.fileno(), 8192)
if buffer == '':
break
else:
output.append(buffer)
(rset,wset,eset) = self.__select([self.stdout],[],[], timeout)
except OSError:
# Pipe closed when we tried to read.
pass
try:
(rset,wset,eset) = self.__select([self.stderr],[],[], timeout)
while (self.stderr in rset):
buffer = os.read(self.stderr.fileno(), 8192)
if buffer == '':
break
else:
error.append(buffer)
(rset, wset, eset) = self.__select([self.stderr], [], [], timeout)
except OSError:
# Pipe closed when we tried to read.
pass
""" Close stdout and stderr PIPEs """
self.stdout.close()
self.stderr.close()
def _check_status(self):
terminated=False
"""Another possibility for the below line would be to try and capture
rusage information. Perhaps that type of call would be an external
method for users to check on status:
(pid, status, rusage)=os.wait4(self.pid, os.WNOHANG)
"""
(pid,status)=os.waitpid(self.pid, os.WNOHANG)
if pid == 0:
#means we are still in progress
return
exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED (status):
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
terminated = True
elif os.WIFSIGNALED (status):
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
terminated = True
elif os.WIFSTOPPED (status):
raise Exception('Wait was called for a child process that is stopped.\
This is not supported. Is some other process attempting\
job control with our child pid?')
self._handle_exitstatus(status)
return terminated
def __select (self, iwtd, owtd, ewtd, timeout=None):
"""This is a wrapper around select.select() that ignores signals.
If select.select raises a select.error exception and errno is an EINTR
error then it is ignored.
"""
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error, e:
if e[0] == errno.EINTR or e[0] == errno.EAGAIN:
# if we loop back we have to subtract the amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else: # something bad caused the select.error
raise
except IOError, e:
if e[0] == errno.EINTR or e[0] == errno.EAGAIN:
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([],[],[])
else:
raise
| |
#!/usr/bin/env python3
import argparse
import os
import re
import xml.etree.ElementTree as ET
from collections import OrderedDict
# Uncomment to do type checks. I have it commented out so it works below Python 3.5
# from typing import List, Dict, TextIO, Tuple, Iterable, Optional, DefaultDict, Any, Union
# http(s)://docs.godotengine.org/<langcode>/<tag>/path/to/page.html(#fragment-tag)
GODOT_DOCS_PATTERN = re.compile(
r"^http(?:s)?://docs\.godotengine\.org/(?:[a-zA-Z0-9.\-_]*)/(?:[a-zA-Z0-9.\-_]*)/(.*)\.html(#.*)?$"
)
def print_error(error, state): # type: (str, State) -> None
print("ERROR: {}".format(error))
state.errored = True
class TypeName:
def __init__(self, type_name, enum=None): # type: (str, Optional[str]) -> None
self.type_name = type_name
self.enum = enum
def to_rst(self, state): # type: ("State") -> str
if self.enum is not None:
return make_enum(self.enum, state)
elif self.type_name == "void":
return "void"
else:
return make_type(self.type_name, state)
@classmethod
def from_element(cls, element): # type: (ET.Element) -> "TypeName"
return cls(element.attrib["type"], element.get("enum"))
class PropertyDef:
def __init__(
self, name, type_name, setter, getter, text, default_value, overridden
): # type: (str, TypeName, Optional[str], Optional[str], Optional[str], Optional[str], Optional[bool]) -> None
self.name = name
self.type_name = type_name
self.setter = setter
self.getter = getter
self.text = text
self.default_value = default_value
self.overridden = overridden
class ParameterDef:
def __init__(self, name, type_name, default_value): # type: (str, TypeName, Optional[str]) -> None
self.name = name
self.type_name = type_name
self.default_value = default_value
class SignalDef:
def __init__(self, name, parameters, description): # type: (str, List[ParameterDef], Optional[str]) -> None
self.name = name
self.parameters = parameters
self.description = description
class MethodDef:
def __init__(
self, name, return_type, parameters, description, qualifiers
): # type: (str, TypeName, List[ParameterDef], Optional[str], Optional[str]) -> None
self.name = name
self.return_type = return_type
self.parameters = parameters
self.description = description
self.qualifiers = qualifiers
class ConstantDef:
def __init__(self, name, value, text): # type: (str, str, Optional[str]) -> None
self.name = name
self.value = value
self.text = text
class EnumDef:
def __init__(self, name): # type: (str) -> None
self.name = name
self.values = OrderedDict() # type: OrderedDict[str, ConstantDef]
class ThemeItemDef:
def __init__(
self, name, type_name, data_name, text, default_value
): # type: (str, TypeName, str, Optional[str], Optional[str]) -> None
self.name = name
self.type_name = type_name
self.data_name = data_name
self.text = text
self.default_value = default_value
class ClassDef:
def __init__(self, name): # type: (str) -> None
self.name = name
self.constants = OrderedDict() # type: OrderedDict[str, ConstantDef]
self.enums = OrderedDict() # type: OrderedDict[str, EnumDef]
self.properties = OrderedDict() # type: OrderedDict[str, PropertyDef]
self.methods = OrderedDict() # type: OrderedDict[str, List[MethodDef]]
self.signals = OrderedDict() # type: OrderedDict[str, SignalDef]
self.theme_items = OrderedDict() # type: OrderedDict[str, ThemeItemDef]
self.inherits = None # type: Optional[str]
self.brief_description = None # type: Optional[str]
self.description = None # type: Optional[str]
self.tutorials = [] # type: List[Tuple[str, str]]
# Used to match the class with XML source for output filtering purposes.
self.filepath = "" # type: str
class State:
def __init__(self): # type: () -> None
# Has any error been reported?
self.errored = False
self.classes = OrderedDict() # type: OrderedDict[str, ClassDef]
self.current_class = "" # type: str
def parse_class(self, class_root, filepath): # type: (ET.Element, str) -> None
class_name = class_root.attrib["name"]
class_def = ClassDef(class_name)
self.classes[class_name] = class_def
class_def.filepath = filepath
inherits = class_root.get("inherits")
if inherits is not None:
class_def.inherits = inherits
brief_desc = class_root.find("brief_description")
if brief_desc is not None and brief_desc.text:
class_def.brief_description = brief_desc.text
desc = class_root.find("description")
if desc is not None and desc.text:
class_def.description = desc.text
properties = class_root.find("members")
if properties is not None:
for property in properties:
assert property.tag == "member"
property_name = property.attrib["name"]
if property_name in class_def.properties:
print_error("Duplicate property '{}', file: {}".format(property_name, class_name), self)
continue
type_name = TypeName.from_element(property)
setter = property.get("setter") or None # Use or None so '' gets turned into None.
getter = property.get("getter") or None
default_value = property.get("default") or None
if default_value is not None:
default_value = "``{}``".format(default_value)
overridden = property.get("override") or False
property_def = PropertyDef(
property_name, type_name, setter, getter, property.text, default_value, overridden
)
class_def.properties[property_name] = property_def
methods = class_root.find("methods")
if methods is not None:
for method in methods:
assert method.tag == "method"
method_name = method.attrib["name"]
qualifiers = method.get("qualifiers")
return_element = method.find("return")
if return_element is not None:
return_type = TypeName.from_element(return_element)
else:
return_type = TypeName("void")
params = parse_arguments(method)
desc_element = method.find("description")
method_desc = None
if desc_element is not None:
method_desc = desc_element.text
method_def = MethodDef(method_name, return_type, params, method_desc, qualifiers)
if method_name not in class_def.methods:
class_def.methods[method_name] = []
class_def.methods[method_name].append(method_def)
constants = class_root.find("constants")
if constants is not None:
for constant in constants:
assert constant.tag == "constant"
constant_name = constant.attrib["name"]
value = constant.attrib["value"]
enum = constant.get("enum")
constant_def = ConstantDef(constant_name, value, constant.text)
if enum is None:
if constant_name in class_def.constants:
print_error("Duplicate constant '{}', file: {}".format(constant_name, class_name), self)
continue
class_def.constants[constant_name] = constant_def
else:
if enum in class_def.enums:
enum_def = class_def.enums[enum]
else:
enum_def = EnumDef(enum)
class_def.enums[enum] = enum_def
enum_def.values[constant_name] = constant_def
signals = class_root.find("signals")
if signals is not None:
for signal in signals:
assert signal.tag == "signal"
signal_name = signal.attrib["name"]
if signal_name in class_def.signals:
print_error("Duplicate signal '{}', file: {}".format(signal_name, class_name), self)
continue
params = parse_arguments(signal)
desc_element = signal.find("description")
signal_desc = None
if desc_element is not None:
signal_desc = desc_element.text
signal_def = SignalDef(signal_name, params, signal_desc)
class_def.signals[signal_name] = signal_def
theme_items = class_root.find("theme_items")
if theme_items is not None:
for theme_item in theme_items:
assert theme_item.tag == "theme_item"
theme_item_name = theme_item.attrib["name"]
theme_item_data_name = theme_item.attrib["data_type"]
theme_item_id = "{}_{}".format(theme_item_data_name, theme_item_name)
if theme_item_id in class_def.theme_items:
print_error(
"Duplicate theme property '{}' of type '{}', file: {}".format(
theme_item_name, theme_item_data_name, class_name
),
self,
)
continue
default_value = theme_item.get("default") or None
if default_value is not None:
default_value = "``{}``".format(default_value)
theme_item_def = ThemeItemDef(
theme_item_name,
TypeName.from_element(theme_item),
theme_item_data_name,
theme_item.text,
default_value,
)
class_def.theme_items[theme_item_id] = theme_item_def
tutorials = class_root.find("tutorials")
if tutorials is not None:
for link in tutorials:
assert link.tag == "link"
if link.text is not None:
class_def.tutorials.append((link.text.strip(), link.get("title", "")))
def sort_classes(self): # type: () -> None
self.classes = OrderedDict(sorted(self.classes.items(), key=lambda t: t[0]))
def parse_arguments(root): # type: (ET.Element) -> List[ParameterDef]
param_elements = root.findall("argument")
params = [None] * len(param_elements) # type: Any
for param_element in param_elements:
param_name = param_element.attrib["name"]
index = int(param_element.attrib["index"])
type_name = TypeName.from_element(param_element)
default = param_element.get("default")
params[index] = ParameterDef(param_name, type_name, default)
cast = params # type: List[ParameterDef]
return cast
def main(): # type: () -> None
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs="+", help="A path to an XML file or a directory containing XML files to parse.")
parser.add_argument("--filter", default="", help="The filepath pattern for XML files to filter.")
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", "-o", default=".", help="The directory to save output .rst files in.")
group.add_argument(
"--dry-run",
action="store_true",
help="If passed, no output will be generated and XML files are only checked for errors.",
)
args = parser.parse_args()
print("Checking for errors in the XML class reference...")
file_list = [] # type: List[str]
for path in args.path:
# Cut off trailing slashes so os.path.basename doesn't choke.
if path.endswith(os.sep):
path = path[:-1]
if os.path.basename(path) == "modules":
for subdir, dirs, _ in os.walk(path):
if "doc_classes" in dirs:
doc_dir = os.path.join(subdir, "doc_classes")
class_file_names = (f for f in os.listdir(doc_dir) if f.endswith(".xml"))
file_list += (os.path.join(doc_dir, f) for f in class_file_names)
elif os.path.isdir(path):
file_list += (os.path.join(path, f) for f in os.listdir(path) if f.endswith(".xml"))
elif os.path.isfile(path):
if not path.endswith(".xml"):
print("Got non-.xml file '{}' in input, skipping.".format(path))
continue
file_list.append(path)
classes = {} # type: Dict[str, ET.Element]
state = State()
for cur_file in file_list:
try:
tree = ET.parse(cur_file)
except ET.ParseError as e:
print_error("Parse error reading file '{}': {}".format(cur_file, e), state)
continue
doc = tree.getroot()
if "version" not in doc.attrib:
print_error("Version missing from 'doc', file: {}".format(cur_file), state)
continue
name = doc.attrib["name"]
if name in classes:
print_error("Duplicate class '{}'".format(name), state)
continue
classes[name] = (doc, cur_file)
for name, data in classes.items():
try:
state.parse_class(data[0], data[1])
except Exception as e:
print_error("Exception while parsing class '{}': {}".format(name, e), state)
state.sort_classes()
pattern = re.compile(args.filter)
# Create the output folder recursively if it doesn't already exist.
os.makedirs(args.output, exist_ok=True)
for class_name, class_def in state.classes.items():
if args.filter and not pattern.search(class_def.filepath):
continue
state.current_class = class_name
make_rst_class(class_def, state, args.dry_run, args.output)
if not state.errored:
print("No errors found.")
if not args.dry_run:
print("Wrote reStructuredText files for each class to: %s" % args.output)
else:
print("Errors were found in the class reference XML. Please check the messages above.")
exit(1)
def make_rst_class(class_def, state, dry_run, output_dir): # type: (ClassDef, State, bool, str) -> None
class_name = class_def.name
if dry_run:
f = open(os.devnull, "w", encoding="utf-8")
else:
f = open(os.path.join(output_dir, "class_" + class_name.lower() + ".rst"), "w", encoding="utf-8")
# Warn contributors not to edit this file directly
f.write(":github_url: hide\n\n")
f.write(".. Generated automatically by doc/tools/makerst.py in Godot's source tree.\n")
f.write(".. DO NOT EDIT THIS FILE, but the " + class_name + ".xml source instead.\n")
f.write(".. The source is found in doc/classes or modules/<name>/doc_classes.\n\n")
f.write(".. _class_" + class_name + ":\n\n")
f.write(make_heading(class_name, "="))
# Inheritance tree
# Ascendants
if class_def.inherits:
inh = class_def.inherits.strip()
f.write("**Inherits:** ")
first = True
while inh in state.classes:
if not first:
f.write(" **<** ")
else:
first = False
f.write(make_type(inh, state))
inode = state.classes[inh].inherits
if inode:
inh = inode.strip()
else:
break
f.write("\n\n")
# Descendents
inherited = []
for c in state.classes.values():
if c.inherits and c.inherits.strip() == class_name:
inherited.append(c.name)
if len(inherited):
f.write("**Inherited By:** ")
for i, child in enumerate(inherited):
if i > 0:
f.write(", ")
f.write(make_type(child, state))
f.write("\n\n")
# Brief description
if class_def.brief_description is not None:
f.write(rstize_text(class_def.brief_description.strip(), state) + "\n\n")
# Class description
if class_def.description is not None and class_def.description.strip() != "":
f.write(make_heading("Description", "-"))
f.write(rstize_text(class_def.description.strip(), state) + "\n\n")
# Online tutorials
if len(class_def.tutorials) > 0:
f.write(make_heading("Tutorials", "-"))
for url, title in class_def.tutorials:
f.write("- " + make_link(url, title) + "\n\n")
# Properties overview
if len(class_def.properties) > 0:
f.write(make_heading("Properties", "-"))
ml = [] # type: List[Tuple[str, str, str]]
for property_def in class_def.properties.values():
type_rst = property_def.type_name.to_rst(state)
default = property_def.default_value
if default is not None and property_def.overridden:
ml.append((type_rst, property_def.name, default + " *(parent override)*"))
else:
ref = ":ref:`{0}<class_{1}_property_{0}>`".format(property_def.name, class_name)
ml.append((type_rst, ref, default))
format_table(f, ml, True)
# Methods overview
if len(class_def.methods) > 0:
f.write(make_heading("Methods", "-"))
ml = []
for method_list in class_def.methods.values():
for m in method_list:
ml.append(make_method_signature(class_def, m, True, state))
format_table(f, ml)
# Theme properties
if len(class_def.theme_items) > 0:
f.write(make_heading("Theme Properties", "-"))
pl = []
for theme_item_def in class_def.theme_items.values():
ref = ":ref:`{0}<class_{2}_theme_{1}_{0}>`".format(
theme_item_def.name, theme_item_def.data_name, class_name
)
pl.append((theme_item_def.type_name.to_rst(state), ref, theme_item_def.default_value))
format_table(f, pl, True)
# Signals
if len(class_def.signals) > 0:
f.write(make_heading("Signals", "-"))
index = 0
for signal in class_def.signals.values():
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_signal_{}:\n\n".format(class_name, signal.name))
_, signature = make_method_signature(class_def, signal, False, state)
f.write("- {}\n\n".format(signature))
if signal.description is not None and signal.description.strip() != "":
f.write(rstize_text(signal.description.strip(), state) + "\n\n")
index += 1
# Enums
if len(class_def.enums) > 0:
f.write(make_heading("Enumerations", "-"))
index = 0
for e in class_def.enums.values():
if index != 0:
f.write("----\n\n")
f.write(".. _enum_{}_{}:\n\n".format(class_name, e.name))
# Sphinx seems to divide the bullet list into individual <ul> tags if we weave the labels into it.
# As such I'll put them all above the list. Won't be perfect but better than making the list visually broken.
# As to why I'm not modifying the reference parser to directly link to the _enum label:
# If somebody gets annoyed enough to fix it, all existing references will magically improve.
for value in e.values.values():
f.write(".. _class_{}_constant_{}:\n\n".format(class_name, value.name))
f.write("enum **{}**:\n\n".format(e.name))
for value in e.values.values():
f.write("- **{}** = **{}**".format(value.name, value.value))
if value.text is not None and value.text.strip() != "":
f.write(" --- " + rstize_text(value.text.strip(), state))
f.write("\n\n")
index += 1
# Constants
if len(class_def.constants) > 0:
f.write(make_heading("Constants", "-"))
# Sphinx seems to divide the bullet list into individual <ul> tags if we weave the labels into it.
# As such I'll put them all above the list. Won't be perfect but better than making the list visually broken.
for constant in class_def.constants.values():
f.write(".. _class_{}_constant_{}:\n\n".format(class_name, constant.name))
for constant in class_def.constants.values():
f.write("- **{}** = **{}**".format(constant.name, constant.value))
if constant.text is not None and constant.text.strip() != "":
f.write(" --- " + rstize_text(constant.text.strip(), state))
f.write("\n\n")
# Property descriptions
if any(not p.overridden for p in class_def.properties.values()) > 0:
f.write(make_heading("Property Descriptions", "-"))
index = 0
for property_def in class_def.properties.values():
if property_def.overridden:
continue
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_property_{}:\n\n".format(class_name, property_def.name))
f.write("- {} **{}**\n\n".format(property_def.type_name.to_rst(state), property_def.name))
info = []
if property_def.default_value is not None:
info.append(("*Default*", property_def.default_value))
if property_def.setter is not None and not property_def.setter.startswith("_"):
info.append(("*Setter*", property_def.setter + "(value)"))
if property_def.getter is not None and not property_def.getter.startswith("_"):
info.append(("*Getter*", property_def.getter + "()"))
if len(info) > 0:
format_table(f, info)
if property_def.text is not None and property_def.text.strip() != "":
f.write(rstize_text(property_def.text.strip(), state) + "\n\n")
index += 1
# Method descriptions
if len(class_def.methods) > 0:
f.write(make_heading("Method Descriptions", "-"))
index = 0
for method_list in class_def.methods.values():
for i, m in enumerate(method_list):
if index != 0:
f.write("----\n\n")
if i == 0:
f.write(".. _class_{}_method_{}:\n\n".format(class_name, m.name))
ret_type, signature = make_method_signature(class_def, m, False, state)
f.write("- {} {}\n\n".format(ret_type, signature))
if m.description is not None and m.description.strip() != "":
f.write(rstize_text(m.description.strip(), state) + "\n\n")
index += 1
# Theme property descriptions
if len(class_def.theme_items) > 0:
f.write(make_heading("Theme Property Descriptions", "-"))
index = 0
for theme_item_def in class_def.theme_items.values():
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_theme_{}_{}:\n\n".format(class_name, theme_item_def.data_name, theme_item_def.name))
f.write("- {} **{}**\n\n".format(theme_item_def.type_name.to_rst(state), theme_item_def.name))
info = []
if theme_item_def.default_value is not None:
info.append(("*Default*", theme_item_def.default_value))
if len(info) > 0:
format_table(f, info)
if theme_item_def.text is not None and theme_item_def.text.strip() != "":
f.write(rstize_text(theme_item_def.text.strip(), state) + "\n\n")
index += 1
f.write(make_footer())
def escape_rst(text, until_pos=-1): # type: (str) -> str
# Escape \ character, otherwise it ends up as an escape character in rst
pos = 0
while True:
pos = text.find("\\", pos, until_pos)
if pos == -1:
break
text = text[:pos] + "\\\\" + text[pos + 1 :]
pos += 2
# Escape * character to avoid interpreting it as emphasis
pos = 0
while True:
pos = text.find("*", pos, until_pos)
if pos == -1:
break
text = text[:pos] + "\*" + text[pos + 1 :]
pos += 2
# Escape _ character at the end of a word to avoid interpreting it as an inline hyperlink
pos = 0
while True:
pos = text.find("_", pos, until_pos)
if pos == -1:
break
if not text[pos + 1].isalnum(): # don't escape within a snake_case word
text = text[:pos] + "\_" + text[pos + 1 :]
pos += 2
else:
pos += 1
return text
def format_codeblock(code_type, post_text, indent_level, state): # types: str, str, int, state
end_pos = post_text.find("[/" + code_type + "]")
if end_pos == -1:
print_error("[" + code_type + "] without a closing tag, file: {}".format(state.current_class), state)
return None
code_text = post_text[len("[" + code_type + "]") : end_pos]
post_text = post_text[end_pos:]
# Remove extraneous tabs
code_pos = 0
while True:
code_pos = code_text.find("\n", code_pos)
if code_pos == -1:
break
to_skip = 0
while code_pos + to_skip + 1 < len(code_text) and code_text[code_pos + to_skip + 1] == "\t":
to_skip += 1
if to_skip > indent_level:
print_error(
"Four spaces should be used for indentation within ["
+ code_type
+ "], file: {}".format(state.current_class),
state,
)
if len(code_text[code_pos + to_skip + 1 :]) == 0:
code_text = code_text[:code_pos] + "\n"
code_pos += 1
else:
code_text = code_text[:code_pos] + "\n " + code_text[code_pos + to_skip + 1 :]
code_pos += 5 - to_skip
return ["\n[" + code_type + "]" + code_text + post_text, len("\n[" + code_type + "]" + code_text)]
def rstize_text(text, state): # type: (str, State) -> str
# Linebreak + tabs in the XML should become two line breaks unless in a "codeblock"
pos = 0
while True:
pos = text.find("\n", pos)
if pos == -1:
break
pre_text = text[:pos]
indent_level = 0
while text[pos + 1] == "\t":
pos += 1
indent_level += 1
post_text = text[pos + 1 :]
# Handle codeblocks
if (
post_text.startswith("[codeblock]")
or post_text.startswith("[gdscript]")
or post_text.startswith("[csharp]")
):
block_type = post_text[1:].split("]")[0]
result = format_codeblock(block_type, post_text, indent_level, state)
if result is None:
return ""
text = pre_text + result[0]
pos += result[1]
# Handle normal text
else:
text = pre_text + "\n\n" + post_text
pos += 2
next_brac_pos = text.find("[")
text = escape_rst(text, next_brac_pos)
# Handle [tags]
inside_code = False
inside_url = False
url_has_name = False
url_link = ""
pos = 0
tag_depth = 0
previous_pos = 0
while True:
pos = text.find("[", pos)
if inside_url and (pos > previous_pos):
url_has_name = True
if pos == -1:
break
endq_pos = text.find("]", pos + 1)
if endq_pos == -1:
break
pre_text = text[:pos]
post_text = text[endq_pos + 1 :]
tag_text = text[pos + 1 : endq_pos]
escape_post = False
if tag_text in state.classes:
if tag_text == state.current_class:
# We don't want references to the same class
tag_text = "``{}``".format(tag_text)
else:
tag_text = make_type(tag_text, state)
escape_post = True
else: # command
cmd = tag_text
space_pos = tag_text.find(" ")
if cmd == "/codeblock" or cmd == "/gdscript" or cmd == "/csharp":
tag_text = ""
tag_depth -= 1
inside_code = False
# Strip newline if the tag was alone on one
if pre_text[-1] == "\n":
pre_text = pre_text[:-1]
elif cmd == "/code":
tag_text = "``"
tag_depth -= 1
inside_code = False
escape_post = True
elif inside_code:
tag_text = "[" + tag_text + "]"
elif cmd.find("html") == 0:
param = tag_text[space_pos + 1 :]
tag_text = param
elif (
cmd.startswith("method")
or cmd.startswith("member")
or cmd.startswith("signal")
or cmd.startswith("constant")
):
param = tag_text[space_pos + 1 :]
if param.find(".") != -1:
ss = param.split(".")
if len(ss) > 2:
print_error("Bad reference: '{}', file: {}".format(param, state.current_class), state)
class_param, method_param = ss
else:
class_param = state.current_class
method_param = param
ref_type = ""
if class_param in state.classes:
class_def = state.classes[class_param]
if cmd.startswith("method"):
if method_param not in class_def.methods:
print_error("Unresolved method '{}', file: {}".format(param, state.current_class), state)
ref_type = "_method"
elif cmd.startswith("member"):
if method_param not in class_def.properties:
print_error("Unresolved member '{}', file: {}".format(param, state.current_class), state)
ref_type = "_property"
elif cmd.startswith("signal"):
if method_param not in class_def.signals:
print_error("Unresolved signal '{}', file: {}".format(param, state.current_class), state)
ref_type = "_signal"
elif cmd.startswith("constant"):
found = False
# Search in the current class
search_class_defs = [class_def]
if param.find(".") == -1:
# Also search in @GlobalScope as a last resort if no class was specified
search_class_defs.append(state.classes["@GlobalScope"])
for search_class_def in search_class_defs:
if method_param in search_class_def.constants:
class_param = search_class_def.name
found = True
else:
for enum in search_class_def.enums.values():
if method_param in enum.values:
class_param = search_class_def.name
found = True
break
if not found:
print_error("Unresolved constant '{}', file: {}".format(param, state.current_class), state)
ref_type = "_constant"
else:
print_error(
"Unresolved type reference '{}' in method reference '{}', file: {}".format(
class_param, param, state.current_class
),
state,
)
repl_text = method_param
if class_param != state.current_class:
repl_text = "{}.{}".format(class_param, method_param)
tag_text = ":ref:`{}<class_{}{}_{}>`".format(repl_text, class_param, ref_type, method_param)
escape_post = True
elif cmd.find("image=") == 0:
tag_text = "" # ''
elif cmd.find("url=") == 0:
url_link = cmd[4:]
tag_text = "`"
tag_depth += 1
inside_url = True
url_has_name = False
elif cmd == "/url":
tag_text = ("" if url_has_name else url_link) + " <" + url_link + ">`_"
tag_depth -= 1
escape_post = True
inside_url = False
url_has_name = False
elif cmd == "center":
tag_depth += 1
tag_text = ""
elif cmd == "/center":
tag_depth -= 1
tag_text = ""
elif cmd == "codeblock":
tag_depth += 1
tag_text = "\n::\n"
inside_code = True
elif cmd == "gdscript":
tag_depth += 1
tag_text = "\n .. code-tab:: gdscript\n"
inside_code = True
elif cmd == "csharp":
tag_depth += 1
tag_text = "\n .. code-tab:: csharp\n"
inside_code = True
elif cmd == "codeblocks":
tag_depth += 1
tag_text = "\n.. tabs::"
elif cmd == "/codeblocks":
tag_depth -= 1
tag_text = ""
elif cmd == "br":
# Make a new paragraph instead of a linebreak, rst is not so linebreak friendly
tag_text = "\n\n"
# Strip potential leading spaces
while post_text[0] == " ":
post_text = post_text[1:]
elif cmd == "i" or cmd == "/i":
if cmd == "/i":
tag_depth -= 1
else:
tag_depth += 1
tag_text = "*"
elif cmd == "b" or cmd == "/b":
if cmd == "/b":
tag_depth -= 1
else:
tag_depth += 1
tag_text = "**"
elif cmd == "u" or cmd == "/u":
if cmd == "/u":
tag_depth -= 1
else:
tag_depth += 1
tag_text = ""
elif cmd == "code":
tag_text = "``"
tag_depth += 1
inside_code = True
elif cmd == "kbd":
tag_text = ":kbd:`"
tag_depth += 1
elif cmd == "/kbd":
tag_text = "`"
tag_depth -= 1
elif cmd.startswith("enum "):
tag_text = make_enum(cmd[5:], state)
escape_post = True
else:
tag_text = make_type(tag_text, state)
escape_post = True
# Properly escape things like `[Node]s`
if escape_post and post_text and (post_text[0].isalnum() or post_text[0] == "("): # not punctuation, escape
post_text = "\ " + post_text
next_brac_pos = post_text.find("[", 0)
iter_pos = 0
while not inside_code:
iter_pos = post_text.find("*", iter_pos, next_brac_pos)
if iter_pos == -1:
break
post_text = post_text[:iter_pos] + "\*" + post_text[iter_pos + 1 :]
iter_pos += 2
iter_pos = 0
while not inside_code:
iter_pos = post_text.find("_", iter_pos, next_brac_pos)
if iter_pos == -1:
break
if not post_text[iter_pos + 1].isalnum(): # don't escape within a snake_case word
post_text = post_text[:iter_pos] + "\_" + post_text[iter_pos + 1 :]
iter_pos += 2
else:
iter_pos += 1
text = pre_text + tag_text + post_text
pos = len(pre_text) + len(tag_text)
previous_pos = pos
if tag_depth > 0:
print_error("Tag depth mismatch: too many/little open/close tags, file: {}".format(state.current_class), state)
return text
def format_table(f, data, remove_empty_columns=False): # type: (TextIO, Iterable[Tuple[str, ...]]) -> None
if len(data) == 0:
return
column_sizes = [0] * len(data[0])
for row in data:
for i, text in enumerate(row):
text_length = len(text or "")
if text_length > column_sizes[i]:
column_sizes[i] = text_length
sep = ""
for size in column_sizes:
if size == 0 and remove_empty_columns:
continue
sep += "+" + "-" * (size + 2)
sep += "+\n"
f.write(sep)
for row in data:
row_text = "|"
for i, text in enumerate(row):
if column_sizes[i] == 0 and remove_empty_columns:
continue
row_text += " " + (text or "").ljust(column_sizes[i]) + " |"
row_text += "\n"
f.write(row_text)
f.write(sep)
f.write("\n")
def make_type(klass, state): # type: (str, State) -> str
link_type = klass
if link_type.endswith("[]"): # Typed array, strip [] to link to contained type.
link_type = link_type[:-2]
if link_type in state.classes:
return ":ref:`{}<class_{}>`".format(klass, link_type)
print_error("Unresolved type '{}', file: {}".format(klass, state.current_class), state)
return klass
def make_enum(t, state): # type: (str, State) -> str
p = t.find(".")
if p >= 0:
c = t[0:p]
e = t[p + 1 :]
# Variant enums live in GlobalScope but still use periods.
if c == "Variant":
c = "@GlobalScope"
e = "Variant." + e
else:
c = state.current_class
e = t
if c in state.classes and e not in state.classes[c].enums:
c = "@GlobalScope"
if not c in state.classes and c.startswith("_"):
c = c[1:] # Remove the underscore prefix
if c in state.classes and e in state.classes[c].enums:
return ":ref:`{0}<enum_{1}_{0}>`".format(e, c)
# Don't fail for `Vector3.Axis`, as this enum is a special case which is expected not to be resolved.
if "{}.{}".format(c, e) != "Vector3.Axis":
print_error("Unresolved enum '{}', file: {}".format(t, state.current_class), state)
return t
def make_method_signature(
class_def, method_def, make_ref, state
): # type: (ClassDef, Union[MethodDef, SignalDef], bool, State) -> Tuple[str, str]
ret_type = " "
ref_type = "signal"
if isinstance(method_def, MethodDef):
ret_type = method_def.return_type.to_rst(state)
ref_type = "method"
out = ""
if make_ref:
out += ":ref:`{0}<class_{1}_{2}_{0}>` ".format(method_def.name, class_def.name, ref_type)
else:
out += "**{}** ".format(method_def.name)
out += "**(**"
for i, arg in enumerate(method_def.parameters):
if i > 0:
out += ", "
else:
out += " "
out += "{} {}".format(arg.type_name.to_rst(state), arg.name)
if arg.default_value is not None:
out += "=" + arg.default_value
if isinstance(method_def, MethodDef) and method_def.qualifiers is not None and "vararg" in method_def.qualifiers:
if len(method_def.parameters) > 0:
out += ", ..."
else:
out += " ..."
out += " **)**"
if isinstance(method_def, MethodDef) and method_def.qualifiers is not None:
# Use substitutions for abbreviations. This is used to display tooltips on hover.
# See `make_footer()` for descriptions.
for qualifier in method_def.qualifiers.split():
out += " |" + qualifier + "|"
return ret_type, out
def make_heading(title, underline): # type: (str, str) -> str
return title + "\n" + (underline * len(title)) + "\n\n"
def make_footer(): # type: () -> str
# Generate reusable abbreviation substitutions.
# This way, we avoid bloating the generated rST with duplicate abbreviations.
# fmt: off
return (
".. |virtual| replace:: :abbr:`virtual (This method should typically be overridden by the user to have any effect.)`\n"
".. |const| replace:: :abbr:`const (This method has no side effects. It doesn't modify any of the instance's member variables.)`\n"
".. |vararg| replace:: :abbr:`vararg (This method accepts any number of arguments after the ones described here.)`\n"
".. |constructor| replace:: :abbr:`constructor (This method is used to construct a type.)`\n"
".. |operator| replace:: :abbr:`operator (This method describes a valid operator to use with this type as left-hand operand.)`\n"
)
# fmt: on
def make_link(url, title): # type: (str, str) -> str
match = GODOT_DOCS_PATTERN.search(url)
if match:
groups = match.groups()
if match.lastindex == 2:
# Doc reference with fragment identifier: emit direct link to section with reference to page, for example:
# `#calling-javascript-from-script in Exporting For Web`
return "`" + groups[1] + " <../" + groups[0] + ".html" + groups[1] + ">`_ in :doc:`../" + groups[0] + "`"
# Commented out alternative: Instead just emit:
# `Subsection in Exporting For Web`
# return "`Subsection <../" + groups[0] + ".html" + groups[1] + ">`__ in :doc:`../" + groups[0] + "`"
elif match.lastindex == 1:
# Doc reference, for example:
# `Math`
return ":doc:`../" + groups[0] + "`"
else:
# External link, for example:
# `http://enet.bespin.org/usergroup0.html`
if title != "":
return "`" + title + " <" + url + ">`_"
else:
return "`" + url + " <" + url + ">`_"
if __name__ == "__main__":
main()
| |
"""Models for the ``django-tinylinks`` app."""
import socket
from http.cookiejar import CookieJar
from urllib.request import HTTPCookieProcessor, Request, build_opener, urlopen
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from urllib3 import PoolManager
from urllib3.exceptions import HTTPError, MaxRetryError, TimeoutError
User = get_user_model()
def get_url_response(pool, link, url):
"""
Function to open and check an URL. In case of failure it sets the relevant
validation error.
"""
response = False
link.is_broken = True
link.redirect_location = ""
# Try to encode e.g. chinese letters
try:
url = url.encode("utf-8")
except UnicodeEncodeError:
link.validation_error = _("Unicode error. Check URL characters.")
return False
try:
response = pool.urlopen("GET", url.decode(), retries=2, timeout=8.0)
except TimeoutError:
link.validation_error = _("Timeout after 8 seconds.")
except MaxRetryError:
link.validation_error = _("Failed after retrying twice.")
except (HTTPError, socket.gaierror):
link.validation_error = _("Not found.")
return response
def validate_long_url(link):
"""
Function to validate a URL. The validator uses urllib3 to test the URL's
availability.
"""
http = PoolManager()
response = get_url_response(http, link, link.long_url)
if response and response.status == 200:
link.is_broken = False
elif response and response.status == 302:
# If link is redirected, validate the redirect location.
if link.long_url.endswith(".pdf"):
# Non-save pdf exception, to avoid relative path redirects
link.is_broken = False
else:
redirect_location = response.get_redirect_location()
redirect = get_url_response(http, link, redirect_location)
link.redirect_location = redirect_location
if redirect.status == 200:
link.is_broken = False
elif redirect.status == 302:
# Seems like an infinite loop. Maybe the server is looking for
# a cookie?
cj = CookieJar()
opener = build_opener(HTTPCookieProcessor(cj))
request = Request(response.get_redirect_location())
response = opener.open(request)
if response.code == 200:
link.is_broken = False
elif response and response.status == 502:
# Sometimes urllib3 repond with a 502er. Those pages might respond with
# a 200er in the Browser, so re-check with urllib2
try:
response = urlopen(link.long_url, timeout=8.0)
except HTTPError:
link.validation_error = _("URL not accessible.")
else:
link.is_broken = False
else:
link.validation_error = _("URL not accessible.")
link.last_checked = timezone.now()
link.save()
return link
class Tinylink(models.Model):
"""
Model to 'translate' long URLs into small ones.
:user: The author of the tinylink.
:long_url: Long URL version.
:short_url: Shortened URL.
:is_broken: Set if the given long URL couldn't be validated.
:validation_error: Description of the occurred error.
:last_checked: Datetime of the last validation process.
:amount_of_views: Field to count the redirect views.
:redirect_location: Redirect location if the long_url is redirected.
"""
user = models.ForeignKey(
User,
verbose_name=_("Author"),
related_name="tinylinks",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
long_url = models.CharField(
max_length=2500,
verbose_name=_("Long URL"),
)
short_url = models.CharField(
max_length=32,
verbose_name=_("Short URL"),
unique=True,
)
is_broken = models.BooleanField(
default=False,
verbose_name=_("Status"),
)
validation_error = models.CharField(
max_length=100,
verbose_name=_("Validation Error"),
default="",
)
last_checked = models.DateTimeField(
default=timezone.now,
verbose_name=_("Last validation"),
)
amount_of_views = models.PositiveIntegerField(
default=0,
verbose_name=_("Amount of views"),
)
redirect_location = models.CharField(
max_length=2500,
verbose_name=_("Redirect location"),
default="",
)
def get_short_url(self) -> str:
return "/".join(
[getattr(settings, "TINYLINK_SHORT_URL_PREFIX", ""), str(self.short_url)]
)
def __unicode__(self):
return self.short_url
class Meta:
ordering = ["-id"]
def can_be_validated(self):
"""
URL can only be validated if the last validation was at least 1
hour ago
"""
if self.last_checked < timezone.now() - timezone.timedelta(minutes=60):
return True
return False
class TinylinkLog(models.Model):
"""
Model to log the usage of the short links
"""
tinylink = models.ForeignKey(
"Tinylink",
verbose_name=_("Tinylink"),
blank=True,
null=True,
on_delete=models.SET_NULL,
)
referrer = models.URLField(
blank=True,
max_length=512,
)
user_agent = models.TextField()
cookie = models.CharField(
max_length=127,
blank=True,
default="",
)
remote_ip = models.GenericIPAddressField()
datetime = models.DateTimeField(auto_now_add=True)
tracked = models.BooleanField(default=False)
class Meta:
ordering = ("-datetime",)
| |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import serial
import time
import re
def parse_visa_resource_string(resource_string):
# valid resource strings:
# ASRL1::INSTR
# ASRL::COM1::INSTR
# ASRL::COM1,9600::INSTR
# ASRL::COM1,9600,8n1::INSTR
# ASRL::/dev/ttyUSB0::INSTR
# ASRL::/dev/ttyUSB0,9600::INSTR
# ASRL::/dev/ttyUSB0,9600,8n1::INSTR
m = re.match('^(?P<prefix>(?P<type>ASRL)\d*)(::(?P<arg1>[^\s:]+))?(::(?P<suffix>INSTR))$',
resource_string, re.I)
if m is not None:
return dict(
type = m.group('type').upper(),
prefix = m.group('prefix'),
arg1 = m.group('arg1'),
suffix = m.group('suffix'),
)
class SerialInstrument:
"Serial instrument interface client"
def __init__(self, port = None, baudrate=9600, bytesize=8, paritymode=0, stopbits=1, timeout=None,
xonxoff=False, rtscts=False, dsrdtr=False):
if port.upper().startswith("ASRL") and '::' in port:
res = parse_visa_resource_string(port)
if res is None:
raise IOError("Invalid resource string")
index = res['prefix'][4:]
if len(index) > 0:
port = int(index)
else:
# port[,baud[,nps]]
# n = data bits (5,6,7,8)
# p = parity (n,o,e,m,s)
# s = stop bits (1,1.5,2)
t = res['arg1'].split(',')
port = t[0]
if len(t) > 1:
baudrate = int(t[1])
self.serial = serial.Serial(port)
self.term_char = '\n'
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.paritymode = paritymode
self.stopbits = stopbits
self.timeout = timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.wait_dsr = False
self.message_delay = 0
self.update_settings()
def update_settings(self):
self.serial.baudrate = self.baudrate
if self.bytesize == 5:
self.serial.bytesize = serial.FIVEBITS
elif self.bytesize == 6:
self.serial.bytesize = serial.SIXBITS
elif self.bytesize == 7:
self.serial.bytesize = serial.SEVENBITS
else:
self.serial.bytesize = serial.EIGHTBITS
if self.paritymode == 1:
self.serial.paritymode = serial.PARITY_ODD
elif self.paritymode == 2:
self.serial.paritymode = serial.PARITY_EVEN
elif self.paritymode == 3:
self.serial.paritymode = serial.PARITY_MARK
elif self.paritymode == 4:
self.serial.paritymode = serial.PARITY_SPACE
else:
self.serial.paritymode = serial.PARITY_NONE
if self.stopbits == 1.5:
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
elif self.stopbits == 2:
self.serial.stopbits = serial.STOPBITS_TWO
else:
self.serial.stopbits = serial.STOPBITS_ONE
self.serial.timeout = self.timeout
self.serial.xonxoff = self.xonxoff
self.serial.rtscts = self.rtscts
self.serial.dsrdtr = self.dsrdtr
if self.dsrdtr:
self.wait_dsr = True
self.message_delay = 0.1
def write_raw(self, data):
"Write binary data to instrument"
if self.term_char is not None:
data += str(self.term_char).encode('utf-8')[0:1]
self.serial.write(data)
if self.message_delay > 0:
time.sleep(self.message_delay)
if self.wait_dsr:
while not self.serial.getDSR():
time.sleep(0.01)
def read_raw(self, num=-1):
"Read binary data from instrument"
data = b''
term_char = str(self.term_char).encode('utf-8')[0:1]
while True:
c = self.serial.read(1)
data += c
num -= 1
if c == term_char:
break
if num == 0:
break
return data
def ask_raw(self, data, num=-1):
"Write then read binary data"
self.write_raw(data)
return self.read_raw(num)
def write(self, message, encoding = 'utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding = 'utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding = 'utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
self.write(message, encoding)
return self.read(num, encoding)
def read_stb(self):
"Read status byte"
raise NotImplementedError()
def trigger(self):
"Send trigger command"
self.write("*TRG")
def clear(self):
"Send clear command"
self.write("*CLS")
def remote(self):
"Send remote command"
raise NotImplementedError()
def local(self):
"Send local command"
raise NotImplementedError()
def lock(self):
"Send lock command"
raise NotImplementedError()
def unlock(self):
"Send unlock command"
raise NotImplementedError()
| |
from django import forms
from django.utils.datastructures import MultiValueDict
from . labels import get_label_choices
from . models import (
BinaryQuestion,
BinaryResponse,
LikertQuestion,
LikertResponse,
MultipleChoiceQuestion,
MultipleChoiceResponse,
OpenEndedQuestion,
OpenEndedResponse,
SurveyResult
)
class ArrayFieldSelectMultiple(forms.SelectMultiple):
"""This is a Form Widget for use with a Postgres ArrayField. It implements
a multi-select interface that can be given a set of `choices`.
You can provide a `delimiter` keyword argument to specify the delimeter used.
"""
def __init__(self, *args, **kwargs):
self.delimiter = kwargs.pop("delimiter", ",")
super(ArrayFieldSelectMultiple, self).__init__(*args, **kwargs)
def render_options(self, choices, value):
# Value *should* be a list, but it might be a delimited string.
if isinstance(value, str):
value = value.split(self.delimiter)
return super(ArrayFieldSelectMultiple, self).render_options(choices, value)
def value_from_datadict(self, data, files, name):
if isinstance(data, MultiValueDict):
# Normally, we'd want a list here, but the SimpleArrayField
# expects to get a string
return self.delimiter.join(data.getlist(name))
return data.get(name, None)
class BaseQuestionForm(forms.ModelForm):
"""A Base form for all question types. This Form includes the widgets and
Media definitions for labels (ArrayFields).
"""
class Meta:
widgets = {
"labels": ArrayFieldSelectMultiple(
choices=get_label_choices(), attrs={'class': 'chosen'}),
}
class Media:
css = {
"all": ("js/chosen/chosen.min.css", )
}
js = ("js/chosen/chosen.jquery.min.js", )
class BinaryQuestionForm(BaseQuestionForm):
class Meta(BaseQuestionForm.Meta):
model = BinaryQuestion
fields = [
'order', 'subscale', 'text', 'instructions', 'available',
'labels', 'instruments',
]
class LikertQuestionForm(BaseQuestionForm):
class Meta(BaseQuestionForm.Meta):
model = LikertQuestion
fields = [
'order', 'subscale', 'text', 'instructions', 'available', 'scale',
'priority', 'labels', 'instruments'
]
class MultipleChoiceQuestionForm(BaseQuestionForm):
class Meta(BaseQuestionForm.Meta):
model = MultipleChoiceQuestion
fields = [
'order', 'subscale', 'text', 'instructions', 'available',
'labels', 'instruments',
]
class OpenEndedQuestionForm(BaseQuestionForm):
class Meta(BaseQuestionForm.Meta):
model = OpenEndedQuestion
fields = [
'order', 'subscale', 'input_type', 'text', 'instructions',
'available', 'labels', 'instruments',
]
class SurveyResponseForm(forms.Form):
"""EXPERIMENTAL! This is a dynamically generated form containing all
questions/options for a given instrument.
"""
def __init__(self, *args, **kwargs):
self._models = {}
self.instrument = kwargs.pop("instrument")
if self.instrument is None:
raise TypeError("SurveyResponseForm requires an instrument argument")
super(SurveyResponseForm, self).__init__(*args, **kwargs)
self._build_fields()
# Keep a dict of the question field id that maps to the question model
# and response model so we can create responses.
def _build_fields(self):
# Iterate over the instrument's questions and create appropriate fields.
self.fields = {}
accepted_question_types = {
"LikertQuestion", 'BinaryQuestion',
'MultipleChoiceQuestion', 'OpenEndedQuestion'
}
question_types = set(t for t, q in self.instrument.questions)
if question_types.issubset(accepted_question_types):
msg = "Only Instruments with Likert and Binary Questions are supported"
self.cleaned_data = {} # hack so add_error doesn't fail
self.add_error(None, msg)
#raise forms.ValidationError(msg, code="invalid")
for qtype, question in self.instrument.questions:
question_key = "question_{0}".format(question.id)
if qtype == "LikertQuestion":
self._models[question_key] = {
'question': question,
'response_model': LikertResponse,
'response_field': 'selected_option',
}
self.fields[question_key] = forms.ChoiceField(
label=question.text,
choices=((o['id'], o['text']) for o in question.options),
help_text=question.instructions
)
elif qtype == "BinaryQuestion":
self._models[question_key] = {
'question': question,
'response_model': BinaryResponse,
'response_field': 'selected_option',
}
self.fields[question_key] = forms.ChoiceField(
label=question.text,
choices=((o['id'], o['text']) for o in question.options),
help_text=question.instructions
)
elif qtype == "MultipleChoiceQuestion":
self._models[question_key] = {
'question': question,
'response_model': MultipleChoiceResponse,
'response_field': 'selected_option',
}
self.fields[question_key] = forms.ChoiceField(
label=question.text,
choices=((o['id'], o['text']) for o in question.options),
help_text=question.instructions
)
elif qtype == "OpenEndedQuestion":
self._models[question_key] = {
'question': question,
'response_model': OpenEndedResponse,
'response_field': 'response',
}
self.fields[question_key] = forms.CharField(
label=question.text,
help_text=question.instructions
)
def save_responses(self, user):
# Once validation has passes, create responses for the form's
# questions, then generate a SurveyResponse object?
#
# cleaned_data will look like this:
#
# {'question_1': 'False',
# 'question_2': 'asdf',
# 'question_28': '1',
# 'question_8': '47'}
for question_id, value in self.cleaned_data.items():
# Create a survey response
question = self._models[question_id]['question']
response_field = self._models[question_id]['response_field']
Model = self._models[question_id]['response_model']
kwargs = {
'user': user,
'question': question,
response_field: self.cleaned_data[question_id],
}
Model.objects.create(**kwargs)
# create SurveyResult object(s)
return SurveyResult.objects.create_objects(user, self.instrument)
| |
#!/usr/bin/env python
# system modules
import os
import logging
import random
import cPickle as pck
# libraries
import h5py
import numpy as np
np.seterr(divide='ignore')
try:
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
except ImportError:
logging.warning('scikit-learn not found.')
sklearn_available = False
else:
sklearn_available = True
try:
from vigra.learning import RandomForest as BaseVigraRandomForest
from vigra.__version__ import version as vigra_version
vigra_version = tuple(map(int, vigra_version.split('.')))
except ImportError:
logging.warning('Vigra library not available.')
vigra_available = False
else:
vigra_available = True
# local imports
import iterprogress as ip
from .adaboost import AdaBoost
def h5py_stack(fn):
try:
a = np.array(h5py.File(fn, 'r')['stack'])
except Exception as except_inst:
print except_inst
raise
return a
def load_classifier(fn):
"""Load a classifier previously saved to disk, given a filename.
Supported classifier types are:
- scikit-learn classifiers saved using either pickle or joblib persistence
- vigra random forest classifiers saved in HDF5 format
Parameters
----------
fn : string
Filename in which the classifier is stored.
Returns
-------
cl : classifier object
cl is one of the supported classifier types; these support at least
the standard scikit-learn interface of `fit()` and `predict_proba()`
"""
if not os.path.exists(fn):
raise IOError("No such file or directory: '%s'" % fn)
try:
with open(fn, 'r') as f:
cl = pck.load(f)
return cl
except pck.UnpicklingError:
pass
if sklearn_available:
try:
cl = joblib.load(fn)
return cl
except KeyError:
pass
if vigra_available:
cl = VigraRandomForest()
try:
cl.load_from_disk(fn)
return cl
except IOError:
pass
except RuntimeError:
pass
raise IOError("File '%s' does not appear to be a valid classifier file"
% fn)
def save_classifier(cl, fn, use_joblib=True, **kwargs):
"""Save a classifier to disk.
Parameters
----------
cl : classifier object
Pickleable object or a classify.VigraRandomForest object.
fn : string
Writeable path/filename.
use_joblib : bool, optional
Whether to prefer joblib persistence to pickle.
kwargs : keyword arguments
Keyword arguments to be passed on to either `pck.dump` or
`joblib.dump`.
Returns
-------
None
Notes
-----
For joblib persistence, `compress=3` is the default.
"""
if isinstance(cl, VigraRandomForest):
cl.save_to_disk(fn)
elif use_joblib and sklearn_available:
if not kwargs.has_key('compress'):
kwargs['compress'] = 3
joblib.dump(cl, fn, **kwargs)
else:
with open(fn, 'w') as f:
pck.dump(cl, f, protocol=kwargs.get('protocol', -1))
def get_classifier(name='random forest', *args, **kwargs):
name = name.lower()
is_random_forest = name.find('random') > -1 and name.find('forest') > -1
if vigra_available and is_random_forest:
return VigraRandomForest(*args, **kwargs)
elif sklearn_available and is_random_forest:
return DefaultRandomForest(*args, **kwargs)
else:
raise NotImplementedError('Classifier "%s" is either not installed ' +
'or not implemented in Ray.')
class DefaultRandomForest(RandomForestClassifier):
def __init__(self, *args, **kwargs):
if len(args) < 1 and not kwargs.has_key('n_estimators'):
kwargs['n_estimators'] = 100
if len(args) < 2 and not kwargs.has_key('criterion'):
kwargs['criterion'] = 'entropy'
if len(args) < 3 and not kwargs.has_key('max_depth'):
kwargs['max_depth'] = 20
if not kwargs.has_key('bootstrap'):
kwargs['bootstrap'] = False
super(DefaultRandomForest, self).__init__(*args, **kwargs)
class VigraRandomForest(object):
def __init__(self, ntrees=255, use_feature_importance=False,
sample_classes_individually=False):
self.rf = BaseVigraRandomForest(treeCount=ntrees,
sample_classes_individually=sample_classes_individually)
self.use_feature_importance = use_feature_importance
self.sample_classes_individually = sample_classes_individually
def fit(self, features, labels):
features = self.check_features_vector(features)
labels = self.check_labels_vector(labels)
if self.use_feature_importance:
self.oob, self.feature_importance = \
self.rf.learnRFWithFeatureSelection(features, labels)
else:
self.oob = self.rf.learnRF(features, labels)
return self
def predict_proba(self, features):
features = self.check_features_vector(features)
return self.rf.predictProbabilities(features)
def predict(self, features):
features = self.check_features_vector(features)
return self.rf.predictLabels(features)
def check_features_vector(self, features):
if features.dtype != np.float32:
features = features.astype(np.float32)
if features.ndim == 1:
features = features[np.newaxis, :]
return features
def check_labels_vector(self, labels):
if labels.dtype != np.uint32:
if len(np.unique(labels[labels < 0])) == 1 \
and not (labels==0).any():
labels[labels < 0] = 0
else:
labels = labels + labels.min()
labels = labels.astype(np.uint32)
labels = labels.reshape((labels.size, 1))
return labels
def save_to_disk(self, fn, rfgroupname='rf', overwrite=True):
self.rf.writeHDF5(fn, rfgroupname, overwrite)
attr_list = ['oob', 'feature_importance', 'use_feature_importance',
'feature_description']
f = h5py.File(fn)
for attr in attr_list:
if hasattr(self, attr):
f[attr] = getattr(self, attr)
def load_from_disk(self, fn, rfgroupname='rf'):
self.rf = BaseVigraRandomForest(str(fn), rfgroupname)
f = h5py.File(fn, 'r')
groups = []
f.visit(groups.append)
attrs = [g for g in groups if not g.startswith(rfgroupname)]
for attr in attrs:
setattr(self, attr, np.array(f[attr]))
def read_rf_info(fn):
f = h5py.File(fn)
return map(np.array, [f['oob'], f['feature_importance']])
def concatenate_data_elements(alldata):
"""Return one big learning set from a list of learning sets.
A learning set is a list/tuple of length 4 containing features, labels,
weights, and node merge history.
"""
return map(np.concatenate, zip(*alldata))
def unique_learning_data_elements(alldata):
if type(alldata[0]) not in (list, tuple): alldata = [alldata]
f, l, w, h = concatenate_data_elements(alldata)
af = f.view('|S%d'%(f.itemsize*(len(f[0]))))
_, uids, iids = np.unique(af, return_index=True, return_inverse=True)
bcs = np.bincount(iids)
logging.debug(
'repeat feature vec min %d, mean %.2f, median %.2f, max %d.' %
(bcs.min(), np.mean(bcs), np.median(bcs), bcs.max())
)
def get_uniques(ar): return ar[uids]
return map(get_uniques, [f, l, w, h])
def sample_training_data(features, labels, num_samples=None):
"""Get a random sample from a classification training dataset.
Parameters
----------
features: np.ndarray [M x N]
The M (number of samples) by N (number of features) feature matrix.
labels: np.ndarray [M] or [M x 1]
The training label for each feature vector.
num_samples: int, optional
The size of the training sample to draw. Return full dataset if `None`
or if num_samples >= M.
Returns
-------
feat: np.ndarray [num_samples x N]
The sampled feature vectors.
lab: np.ndarray [num_samples] or [num_samples x 1]
The sampled training labels
"""
m = len(features)
if num_samples is None or num_samples >= m:
return features, labels
idxs = random.sample(range(m), num_samples)
return features[idxs], labels[idxs]
def save_training_data_to_disk(data, fn, names=None, info='N/A'):
if names is None:
names = ['features', 'labels', 'weights', 'history']
fout = h5py.File(fn, 'w')
for data_elem, name in zip(data, names):
fout[name] = data_elem
fout.attrs['info'] = info
fout.close()
def load_training_data_from_disk(fn, names=None, info='N/A'):
if names is None:
names = ['features', 'labels', 'weights', 'history']
fin = h5py.File(fn, 'r')
data = []
for name in names:
data.append(np.array(fin[name]))
return data
def boundary_overlap_threshold(boundary_idxs, gt, tol_false, tol_true):
"""Return -1, 0 or 1 by thresholding overlaps between boundaries."""
n = len(boundary_idxs)
gt_boundary = 1-gt.ravel()[boundary_idxs].astype(bool)
fraction_true = gt_boundary.astype(np.double).sum() / n
if fraction_true > tol_true:
return 1
elif fraction_true > tol_false:
return 0
else:
return -1
def make_thresholded_boundary_overlap_loss(tol_false, tol_true):
"""Return a merge loss function based on boundary overlaps."""
def loss(g, n1, n2, gt):
boundary_idxs = list(g[n1][n2]['boundary'])
return \
boundary_overlap_threshold(boundary_idxs, gt, tol_false, tol_true)
return loss
def label_merges(g, merge_history, feature_map_function, gt, loss_function):
"""Replay an agglomeration history and label the loss of each merge."""
labels = np.zeros(len(merge_history))
number_of_features = feature_map_function(g, *g.edges_iter().next()).size
features = np.zeros((len(merge_history), number_of_features))
labeled_image = np.zeros(gt.shape, np.double)
for i, nodes in enumerate(ip.with_progress(
merge_history, title='Replaying merge history...',
pbar=ip.StandardProgressBar())):
n1, n2 = nodes
features[i,:] = feature_map_function(g, n1, n2)
labels[i] = loss_function(g, n1, n2, gt)
labeled_image.ravel()[list(g[n1][n2]['boundary'])] = 2+labels[i]
g.merge_nodes(n1,n2)
return features, labels, labeled_image
def select_classifier(cname, features=None, labels=None, **kwargs):
if 'svm'.startswith(cname):
del kwargs['class_weight']
c = SVC(probability=True, **kwargs)
elif 'logistic-regression'.startswith(cname):
c = LogisticRegression()
elif 'linear-regression'.startswith(cname):
c = LinearRegression()
elif 'random-forest'.startswith(cname):
if sklearn_available:
c = DefaultRandomForest()
elif vigra_available:
c = VigraRandomForest()
else:
raise RuntimeError('tried to use random forest classifier, ' +
'but neither scikit-learn nor vigra are available.')
elif 'adaboost'.startswith(cname):
c = AdaBoost(**kwargs)
if features is not None and labels is not None:
c = c.fit(features, labels, **kwargs)
return c
| |
import cgi
from eventlet import greenthread
import eventlet
import errno
import os
import socket
import sys
from tests import skipped, LimitedTestCase, skip_with_pyevent
from unittest import main
from eventlet import greenio
from eventlet import event
from eventlet.green import socket as greensocket
from eventlet import wsgi
from eventlet.support import get_errno
from tests import find_command
httplib = eventlet.import_patched('httplib')
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def hello_world(env, start_response):
if env['PATH_INFO'] == 'notexist':
start_response('404 Not Found', [('Content-type', 'text/plain')])
return ["not found"]
start_response('200 OK', [('Content-type', 'text/plain')])
return ["hello world"]
def chunked_app(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
yield "this"
yield "is"
yield "chunked"
def big_chunks(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
line = 'a' * 8192
for x in range(10):
yield line
def use_write(env, start_response):
if env['PATH_INFO'] == '/a':
write = start_response('200 OK', [('Content-type', 'text/plain'),
('Content-Length', '5')])
write('abcde')
if env['PATH_INFO'] == '/b':
write = start_response('200 OK', [('Content-type', 'text/plain')])
write('abcde')
return []
def chunked_post(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
if env['PATH_INFO'] == '/a':
return [env['wsgi.input'].read()]
elif env['PATH_INFO'] == '/b':
return [x for x in iter(lambda: env['wsgi.input'].read(4096), '')]
elif env['PATH_INFO'] == '/c':
return [x for x in iter(lambda: env['wsgi.input'].read(1), '')]
def already_handled(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
return wsgi.ALREADY_HANDLED
class Site(object):
def __init__(self):
self.application = hello_world
def __call__(self, env, start_response):
return self.application(env, start_response)
class IterableApp(object):
def __init__(self, send_start_response=False, return_val=wsgi.ALREADY_HANDLED):
self.send_start_response = send_start_response
self.return_val = return_val
self.env = {}
def __call__(self, env, start_response):
self.env = env
if self.send_start_response:
start_response('200 OK', [('Content-type', 'text/plain')])
return self.return_val
class IterableSite(Site):
def __call__(self, env, start_response):
it = self.application(env, start_response)
for i in it:
yield i
CONTENT_LENGTH = 'content-length'
"""
HTTP/1.1 200 OK
Date: foo
Content-length: 11
hello world
"""
class ConnectionClosed(Exception):
pass
def read_http(sock):
fd = sock.makefile()
try:
response_line = fd.readline()
except socket.error, exc:
if get_errno(exc) == 10053:
raise ConnectionClosed
raise
if not response_line:
raise ConnectionClosed
header_lines = []
while True:
line = fd.readline()
if line == '\r\n':
break
else:
header_lines.append(line)
headers = dict()
for x in header_lines:
x = x.strip()
if not x:
continue
key, value = x.split(': ', 1)
assert key.lower() not in headers, "%s header duplicated" % key
headers[key.lower()] = value
if CONTENT_LENGTH in headers:
num = int(headers[CONTENT_LENGTH])
body = fd.read(num)
else:
# read until EOF
body = fd.read()
return response_line, headers, body
class _TestBase(LimitedTestCase):
def setUp(self):
super(_TestBase, self).setUp()
self.logfile = StringIO()
self.site = Site()
self.killer = None
self.set_site()
self.spawn_server()
def tearDown(self):
greenthread.kill(self.killer)
eventlet.sleep(0)
super(_TestBase, self).tearDown()
def spawn_server(self, **kwargs):
"""Spawns a new wsgi server with the given arguments.
Sets self.port to the port of the server, and self.killer is the greenlet
running it.
Kills any previously-running server."""
eventlet.sleep(0) # give previous server a chance to start
if self.killer:
greenthread.kill(self.killer)
eventlet.sleep(0) # give killer a chance to kill
new_kwargs = dict(max_size=128,
log=self.logfile,
site=self.site)
new_kwargs.update(kwargs)
if 'sock' not in new_kwargs:
new_kwargs['sock'] = eventlet.listen(('localhost', 0))
self.port = new_kwargs['sock'].getsockname()[1]
self.killer = eventlet.spawn_n(
wsgi.server,
**new_kwargs)
def set_site(self):
raise NotImplementedError
class TestHttpd(_TestBase):
def set_site(self):
self.site = Site()
def test_001_server(self):
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
fd.flush()
result = fd.read()
fd.close()
## The server responds with the maximum version it supports
self.assert_(result.startswith('HTTP'), result)
self.assert_(result.endswith('hello world'))
def test_002_keepalive(self):
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
read_http(sock)
fd.close()
def test_003_passing_non_int_to_read(self):
# This should go in greenio_test
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
cancel = eventlet.Timeout(1, RuntimeError)
self.assertRaises(TypeError, fd.read, "This shouldn't work")
cancel.cancel()
fd.close()
def test_004_close_keepalive(self):
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
self.assertRaises(ConnectionClosed, read_http, sock)
fd.close()
@skipped
def test_005_run_apachebench(self):
url = 'http://localhost:12346/'
# ab is apachebench
from eventlet.green import subprocess
subprocess.call([find_command('ab'),
'-c','64','-n','1024', '-k', url],
stdout=subprocess.PIPE)
def test_006_reject_long_urls(self):
sock = eventlet.connect(
('localhost', self.port))
path_parts = []
for ii in range(3000):
path_parts.append('path')
path = '/'.join(path_parts)
request = 'GET /%s HTTP/1.0\r\nHost: localhost\r\n\r\n' % path
fd = sock.makefile('rw')
fd.write(request)
fd.flush()
result = fd.readline()
if result:
# windows closes the socket before the data is flushed,
# so we never get anything back
status = result.split(' ')[1]
self.assertEqual(status, '414')
fd.close()
def test_007_get_arg(self):
# define a new handler that does a get_arg as well as a read_body
def new_app(env, start_response):
body = env['wsgi.input'].read()
a = cgi.parse_qs(body).get('a', [1])[0]
start_response('200 OK', [('Content-type', 'text/plain')])
return ['a is %s, body is %s' % (a, body)]
self.site.application = new_app
sock = eventlet.connect(
('localhost', self.port))
request = '\r\n'.join((
'POST / HTTP/1.0',
'Host: localhost',
'Content-Length: 3',
'',
'a=a'))
fd = sock.makefile('w')
fd.write(request)
fd.flush()
# send some junk after the actual request
fd.write('01234567890123456789')
reqline, headers, body = read_http(sock)
self.assertEqual(body, 'a is a, body is a=a')
fd.close()
def test_008_correctresponse(self):
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
response_line_200,_,_ = read_http(sock)
fd.write('GET /notexist HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
response_line_404,_,_ = read_http(sock)
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
response_line_test,_,_ = read_http(sock)
self.assertEqual(response_line_200,response_line_test)
fd.close()
def test_009_chunked_response(self):
self.site.application = chunked_app
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
self.assert_('Transfer-Encoding: chunked' in fd.read())
def test_010_no_chunked_http_1_0(self):
self.site.application = chunked_app
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
self.assert_('Transfer-Encoding: chunked' not in fd.read())
def test_011_multiple_chunks(self):
self.site.application = big_chunks
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
headers = ''
while True:
line = fd.readline()
if line == '\r\n':
break
else:
headers += line
self.assert_('Transfer-Encoding: chunked' in headers)
chunks = 0
chunklen = int(fd.readline(), 16)
while chunklen:
chunks += 1
chunk = fd.read(chunklen)
fd.readline() # CRLF
chunklen = int(fd.readline(), 16)
self.assert_(chunks > 1)
response = fd.read()
# Require a CRLF to close the message body
self.assertEqual(response, '\r\n')
def test_012_ssl_server(self):
def wsgi_app(environ, start_response):
start_response('200 OK', {})
return [environ['wsgi.input'].read()]
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
server_sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)),
certfile=certificate_file,
keyfile=private_key_file,
server_side=True)
self.spawn_server(sock=server_sock, site=wsgi_app)
sock = eventlet.connect(('localhost', self.port))
sock = eventlet.wrap_ssl(sock)
sock.write('POST /foo HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\nContent-length:3\r\n\r\nabc')
result = sock.read(8192)
self.assertEquals(result[-3:], 'abc')
def test_013_empty_return(self):
def wsgi_app(environ, start_response):
start_response("200 OK", [])
return [""]
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
server_sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)),
certfile=certificate_file,
keyfile=private_key_file,
server_side=True)
self.spawn_server(sock=server_sock, site=wsgi_app)
sock = eventlet.connect(('localhost', server_sock.getsockname()[1]))
sock = eventlet.wrap_ssl(sock)
sock.write('GET /foo HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
result = sock.read(8192)
self.assertEquals(result[-4:], '\r\n\r\n')
def test_014_chunked_post(self):
self.site.application = chunked_post
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
fd.flush()
while True:
if fd.readline() == '\r\n':
break
response = fd.read()
self.assert_(response == 'oh hai', 'invalid response %s' % response)
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('PUT /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
fd.flush()
while True:
if fd.readline() == '\r\n':
break
response = fd.read()
self.assert_(response == 'oh hai', 'invalid response %s' % response)
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('PUT /c HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n4\r\n hai\r\n0\r\n\r\n')
fd.flush()
while True:
if fd.readline() == '\r\n':
break
response = fd.read(8192)
self.assert_(response == 'oh hai', 'invalid response %s' % response)
def test_015_write(self):
self.site.application = use_write
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_('content-length' in headers)
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET /b HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_('transfer-encoding' in headers)
self.assert_(headers['transfer-encoding'] == 'chunked')
def test_016_repeated_content_length(self):
"""
content-length header was being doubled up if it was set in
start_response and could also be inferred from the iterator
"""
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-Length', '7')])
return ['testing']
self.site.application = wsgi_app
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET /a HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
header_lines = []
while True:
line = fd.readline()
if line == '\r\n':
break
else:
header_lines.append(line)
self.assertEquals(1, len([l for l in header_lines
if l.lower().startswith('content-length')]))
def test_017_ssl_zeroreturnerror(self):
def server(sock, site, log):
try:
serv = wsgi.Server(sock, sock.getsockname(), site, log)
client_socket = sock.accept()
serv.process_request(client_socket)
return True
except:
import traceback
traceback.print_exc()
return False
def wsgi_app(environ, start_response):
start_response('200 OK', [])
return [environ['wsgi.input'].read()]
certificate_file = os.path.join(os.path.dirname(__file__), 'test_server.crt')
private_key_file = os.path.join(os.path.dirname(__file__), 'test_server.key')
sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)),
certfile=certificate_file,
keyfile=private_key_file,
server_side=True)
server_coro = eventlet.spawn(server, sock, wsgi_app, self.logfile)
client = eventlet.connect(('localhost', sock.getsockname()[1]))
client = eventlet.wrap_ssl(client)
client.write('X') # non-empty payload so that SSL handshake occurs
greenio.shutdown_safe(client)
client.close()
success = server_coro.wait()
self.assert_(success)
def test_018_http_10_keepalive(self):
# verify that if an http/1.0 client sends connection: keep-alive
# that we don't close the connection
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('w')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_('connection' in headers)
self.assertEqual('keep-alive', headers['connection'])
# repeat request to verify connection is actually still open
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_('connection' in headers)
self.assertEqual('keep-alive', headers['connection'])
def test_019_fieldstorage_compat(self):
def use_fieldstorage(environ, start_response):
import cgi
fs = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ)
start_response('200 OK', [('Content-type', 'text/plain')])
return ['hello!']
self.site.application = use_fieldstorage
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('POST / HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'2\r\noh\r\n'
'4\r\n hai\r\n0\r\n\r\n')
fd.flush()
self.assert_('hello!' in fd.read())
def test_020_x_forwarded_for(self):
sock = eventlet.connect(('localhost', self.port))
sock.sendall('GET / HTTP/1.1\r\nHost: localhost\r\nX-Forwarded-For: 1.2.3.4, 5.6.7.8\r\n\r\n')
sock.recv(1024)
sock.close()
self.assert_('1.2.3.4,5.6.7.8,127.0.0.1' in self.logfile.getvalue())
# turning off the option should work too
self.logfile = StringIO()
self.spawn_server(log_x_forwarded_for=False)
sock = eventlet.connect(('localhost', self.port))
sock.sendall('GET / HTTP/1.1\r\nHost: localhost\r\nX-Forwarded-For: 1.2.3.4, 5.6.7.8\r\n\r\n')
sock.recv(1024)
sock.close()
self.assert_('1.2.3.4' not in self.logfile.getvalue())
self.assert_('5.6.7.8' not in self.logfile.getvalue())
self.assert_('127.0.0.1' in self.logfile.getvalue())
def test_socket_remains_open(self):
greenthread.kill(self.killer)
server_sock = eventlet.listen(('localhost', 0))
server_sock_2 = server_sock.dup()
self.spawn_server(sock=server_sock_2)
# do a single req/response to verify it's up
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
fd.flush()
result = fd.read(1024)
fd.close()
self.assert_(result.startswith('HTTP'), result)
self.assert_(result.endswith('hello world'))
# shut down the server and verify the server_socket fd is still open,
# but the actual socketobject passed in to wsgi.server is closed
greenthread.kill(self.killer)
eventlet.sleep(0) # make the kill go through
try:
server_sock_2.accept()
# shouldn't be able to use this one anymore
except socket.error, exc:
self.assertEqual(get_errno(exc), errno.EBADF)
self.spawn_server(sock=server_sock)
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
fd.flush()
result = fd.read(1024)
fd.close()
self.assert_(result.startswith('HTTP'), result)
self.assert_(result.endswith('hello world'))
def test_021_environ_clobbering(self):
def clobberin_time(environ, start_response):
for environ_var in ['wsgi.version', 'wsgi.url_scheme',
'wsgi.input', 'wsgi.errors', 'wsgi.multithread',
'wsgi.multiprocess', 'wsgi.run_once', 'REQUEST_METHOD',
'SCRIPT_NAME', 'PATH_INFO', 'QUERY_STRING', 'CONTENT_TYPE',
'CONTENT_LENGTH', 'SERVER_NAME', 'SERVER_PORT',
'SERVER_PROTOCOL']:
environ[environ_var] = None
start_response('200 OK', [('Content-type', 'text/plain')])
return []
self.site.application = clobberin_time
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\n'
'Host: localhost\r\n'
'Connection: close\r\n'
'\r\n\r\n')
fd.flush()
self.assert_('200 OK' in fd.read())
def test_022_custom_pool(self):
# just test that it accepts the parameter for now
# TODO: test that it uses the pool and that you can waitall() to
# ensure that all clients finished
from eventlet import greenpool
p = greenpool.GreenPool(5)
self.spawn_server(custom_pool=p)
# this stuff is copied from test_001_server, could be better factored
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
fd.flush()
result = fd.read()
fd.close()
self.assert_(result.startswith('HTTP'), result)
self.assert_(result.endswith('hello world'))
def test_023_bad_content_length(self):
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.0\r\nHost: localhost\r\nContent-length: argh\r\n\r\n')
fd.flush()
result = fd.read()
fd.close()
self.assert_(result.startswith('HTTP'), result)
self.assert_('400 Bad Request' in result)
self.assert_('500' not in result)
def test_024_expect_100_continue(self):
def wsgi_app(environ, start_response):
if int(environ['CONTENT_LENGTH']) > 1024:
start_response('417 Expectation Failed', [('Content-Length', '7')])
return ['failure']
else:
text = environ['wsgi.input'].read()
start_response('200 OK', [('Content-Length', str(len(text)))])
return [text]
self.site.application = wsgi_app
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 1025\r\nExpect: 100-continue\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_(response_line.startswith('HTTP/1.1 417 Expectation Failed'))
self.assertEquals(body, 'failure')
fd.write('PUT / HTTP/1.1\r\nHost: localhost\r\nContent-length: 7\r\nExpect: 100-continue\r\n\r\ntesting')
fd.flush()
header_lines = []
while True:
line = fd.readline()
if line == '\r\n':
break
else:
header_lines.append(line)
self.assert_(header_lines[0].startswith('HTTP/1.1 100 Continue'))
header_lines = []
while True:
line = fd.readline()
if line == '\r\n':
break
else:
header_lines.append(line)
self.assert_(header_lines[0].startswith('HTTP/1.1 200 OK'))
self.assertEquals(fd.read(7), 'testing')
fd.close()
def test_025_accept_errors(self):
from eventlet import debug
debug.hub_exceptions(True)
listener = greensocket.socket()
listener.bind(('localhost', 0))
# NOT calling listen, to trigger the error
self.logfile = StringIO()
self.spawn_server(sock=listener)
old_stderr = sys.stderr
try:
sys.stderr = self.logfile
eventlet.sleep(0) # need to enter server loop
try:
eventlet.connect(('localhost', self.port))
self.fail("Didn't expect to connect")
except socket.error, exc:
self.assertEquals(get_errno(exc), errno.ECONNREFUSED)
self.assert_('Invalid argument' in self.logfile.getvalue(),
self.logfile.getvalue())
finally:
sys.stderr = old_stderr
debug.hub_exceptions(False)
def test_026_log_format(self):
self.spawn_server(log_format="HI %(request_line)s HI")
sock = eventlet.connect(('localhost', self.port))
sock.sendall('GET /yo! HTTP/1.1\r\nHost: localhost\r\n\r\n')
sock.recv(1024)
sock.close()
self.assert_('\nHI GET /yo! HTTP/1.1 HI\n' in self.logfile.getvalue(), self.logfile.getvalue())
def test_close_chunked_with_1_0_client(self):
# verify that if we return a generator from our app
# and we're not speaking with a 1.1 client, that we
# close the connection
self.site.application = chunked_app
sock = eventlet.connect(('localhost', self.port))
sock.sendall('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
response_line, headers, body = read_http(sock)
self.assertEqual(headers['connection'], 'close')
self.assertNotEqual(headers.get('transfer-encoding'), 'chunked')
self.assertEquals(body, "thisischunked")
def test_026_http_10_nokeepalive(self):
# verify that if an http/1.0 client sends connection: keep-alive
# and the server doesn't accept keep-alives, we close the connection
self.spawn_server(keepalive=False)
sock = eventlet.connect(
('localhost', self.port))
sock.sendall('GET / HTTP/1.0\r\nHost: localhost\r\nConnection: keep-alive\r\n\r\n')
response_line, headers, body = read_http(sock)
self.assertEqual(headers['connection'], 'close')
def test_027_keepalive_chunked(self):
self.site.application = chunked_post
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('w')
fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nTransfer-Encoding: chunked\r\n\r\n10\r\n0123456789abcdef\r\n0\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('PUT /b HTTP/1.1\r\nHost: localhost\r\nTransfer-Encoding: chunked\r\n\r\n10\r\n0123456789abcdef\r\n0\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('PUT /c HTTP/1.1\r\nHost: localhost\r\nTransfer-Encoding: chunked\r\n\r\n10\r\n0123456789abcdef\r\n0\r\n\r\n')
fd.flush()
read_http(sock)
fd.write('PUT /a HTTP/1.1\r\nHost: localhost\r\nTransfer-Encoding: chunked\r\n\r\n10\r\n0123456789abcdef\r\n0\r\n\r\n')
fd.flush()
read_http(sock)
def test_028_ssl_handshake_errors(self):
errored = [False]
def server(sock):
try:
wsgi.server(sock=sock, site=hello_world, log=self.logfile)
errored[0] = 'SSL handshake error caused wsgi.server to exit.'
except greenthread.greenlet.GreenletExit:
pass
except Exception, e:
errored[0] = 'SSL handshake error raised exception %s.' % e
for data in ('', 'GET /non-ssl-request HTTP/1.0\r\n\r\n'):
srv_sock = eventlet.wrap_ssl(eventlet.listen(('localhost', 0)),
certfile=certificate_file,
keyfile=private_key_file,
server_side=True)
port = srv_sock.getsockname()[1]
g = eventlet.spawn_n(server, srv_sock)
client = eventlet.connect(('localhost', port))
if data: # send non-ssl request
client.sendall(data)
else: # close sock prematurely
client.close()
eventlet.sleep(0) # let context switch back to server
self.assert_(not errored[0], errored[0])
# make another request to ensure the server's still alive
try:
from eventlet.green import ssl
client = ssl.wrap_socket(eventlet.connect(('localhost', port)))
client.write('GET / HTTP/1.0\r\nHost: localhost\r\n\r\n')
result = client.read()
self.assert_(result.startswith('HTTP'), result)
self.assert_(result.endswith('hello world'))
except ImportError:
pass # TODO: should test with OpenSSL
greenthread.kill(g)
def test_029_posthooks(self):
posthook1_count = [0]
posthook2_count = [0]
def posthook1(env, value, multiplier=1):
self.assertEquals(env['local.test'], 'test_029_posthooks')
posthook1_count[0] += value * multiplier
def posthook2(env, value, divisor=1):
self.assertEquals(env['local.test'], 'test_029_posthooks')
posthook2_count[0] += value / divisor
def one_posthook_app(env, start_response):
env['local.test'] = 'test_029_posthooks'
if 'eventlet.posthooks' not in env:
start_response('500 eventlet.posthooks not supported',
[('Content-Type', 'text/plain')])
else:
env['eventlet.posthooks'].append(
(posthook1, (2,), {'multiplier': 3}))
start_response('200 OK', [('Content-Type', 'text/plain')])
yield ''
self.site.application = one_posthook_app
sock = eventlet.connect(('localhost', self.port))
fp = sock.makefile('rw')
fp.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fp.flush()
self.assertEquals(fp.readline(), 'HTTP/1.1 200 OK\r\n')
fp.close()
sock.close()
self.assertEquals(posthook1_count[0], 6)
self.assertEquals(posthook2_count[0], 0)
def two_posthook_app(env, start_response):
env['local.test'] = 'test_029_posthooks'
if 'eventlet.posthooks' not in env:
start_response('500 eventlet.posthooks not supported',
[('Content-Type', 'text/plain')])
else:
env['eventlet.posthooks'].append(
(posthook1, (4,), {'multiplier': 5}))
env['eventlet.posthooks'].append(
(posthook2, (100,), {'divisor': 4}))
start_response('200 OK', [('Content-Type', 'text/plain')])
yield ''
self.site.application = two_posthook_app
sock = eventlet.connect(('localhost', self.port))
fp = sock.makefile('rw')
fp.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fp.flush()
self.assertEquals(fp.readline(), 'HTTP/1.1 200 OK\r\n')
fp.close()
sock.close()
self.assertEquals(posthook1_count[0], 26)
self.assertEquals(posthook2_count[0], 25)
def test_zero_length_chunked_response(self):
def zero_chunked_app(env, start_response):
start_response('200 OK', [('Content-type', 'text/plain')])
yield ""
self.site.application = zero_chunked_app
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
response = fd.read().split('\r\n')
headers = []
while True:
h = response.pop(0)
headers.append(h)
if h == '':
break
self.assert_('Transfer-Encoding: chunked' in ''.join(headers))
# should only be one chunk of zero size with two blank lines
# (one terminates the chunk, one terminates the body)
self.assertEqual(response, ['0', '', ''])
def test_aborted_chunked_post(self):
read_content = event.Event()
blew_up = [False]
def chunk_reader(env, start_response):
try:
content = env['wsgi.input'].read(1024)
except IOError:
blew_up[0] = True
content = 'ok'
read_content.send(content)
start_response('200 OK', [('Content-Type', 'text/plain')])
return [content]
self.site.application = chunk_reader
expected_body = 'a bunch of stuff'
data = "\r\n".join(['PUT /somefile HTTP/1.0',
'Transfer-Encoding: chunked',
'',
'def',
expected_body])
# start PUT-ing some chunked data but close prematurely
sock = eventlet.connect(('127.0.0.1', self.port))
sock.sendall(data)
sock.close()
# the test passes if we successfully get here, and read all the data
# in spite of the early close
self.assertEqual(read_content.wait(), 'ok')
self.assert_(blew_up[0])
def test_exceptions_close_connection(self):
def wsgi_app(environ, start_response):
raise RuntimeError("intentional error")
self.site.application = wsgi_app
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_(response_line.startswith('HTTP/1.1 500 Internal Server Error'))
self.assertEqual(headers['connection'], 'close')
self.assert_('transfer-encoding' not in headers)
def test_unicode_raises_error(self):
def wsgi_app(environ, start_response):
start_response("200 OK", [])
yield u"oh hai"
yield u"non-encodable unicode: \u0230"
self.site.application = wsgi_app
sock = eventlet.connect(('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assert_(response_line.startswith('HTTP/1.1 500 Internal Server Error'))
self.assertEqual(headers['connection'], 'close')
self.assert_('unicode' in body)
def read_headers(sock):
fd = sock.makefile()
try:
response_line = fd.readline()
except socket.error, exc:
if get_errno(exc) == 10053:
raise ConnectionClosed
raise
if not response_line:
raise ConnectionClosed
header_lines = []
while True:
line = fd.readline()
if line == '\r\n':
break
else:
header_lines.append(line)
headers = dict()
for x in header_lines:
x = x.strip()
if not x:
continue
key, value = x.split(': ', 1)
assert key.lower() not in headers, "%s header duplicated" % key
headers[key.lower()] = value
return response_line, headers
class IterableAlreadyHandledTest(_TestBase):
def set_site(self):
self.site = IterableSite()
def get_app(self):
return IterableApp(True)
def test_iterable_app_keeps_socket_open_unless_connection_close_sent(self):
self.site.application = self.get_app()
sock = eventlet.connect(
('localhost', self.port))
fd = sock.makefile('rw')
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\n\r\n')
fd.flush()
response_line, headers = read_headers(sock)
self.assertEqual(response_line, 'HTTP/1.1 200 OK\r\n')
self.assert_('connection' not in headers)
fd.write('GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n')
fd.flush()
response_line, headers, body = read_http(sock)
self.assertEqual(response_line, 'HTTP/1.1 200 OK\r\n')
self.assertEqual(headers.get('transfer-encoding'), 'chunked')
self.assertEqual(body, '0\r\n\r\n') # Still coming back chunked
class ProxiedIterableAlreadyHandledTest(IterableAlreadyHandledTest):
# same thing as the previous test but ensuring that it works with tpooled
# results as well as regular ones
@skip_with_pyevent
def get_app(self):
from eventlet import tpool
return tpool.Proxy(super(ProxiedIterableAlreadyHandledTest, self).get_app())
def tearDown(self):
from eventlet import tpool
tpool.killall()
super(ProxiedIterableAlreadyHandledTest, self).tearDown()
class TestChunkedInput(_TestBase):
dirt = ""
validator = None
def application(self, env, start_response):
input = env['wsgi.input']
response = []
pi = env["PATH_INFO"]
if pi=="/short-read":
d=input.read(10)
response = [d]
elif pi=="/lines":
for x in input:
response.append(x)
elif pi=="/ping":
input.read()
response.append("pong")
else:
raise RuntimeError("bad path")
start_response('200 OK', [('Content-Type', 'text/plain')])
return response
def connect(self):
return eventlet.connect(('localhost', self.port))
def set_site(self):
self.site = Site()
self.site.application = self.application
def chunk_encode(self, chunks, dirt=None):
if dirt is None:
dirt = self.dirt
b = ""
for c in chunks:
b += "%x%s\r\n%s\r\n" % (len(c), dirt, c)
return b
def body(self, dirt=None):
return self.chunk_encode(["this", " is ", "chunked", "\nline", " 2", "\n", "line3", ""], dirt=dirt)
def ping(self, fd):
fd.sendall("GET /ping HTTP/1.1\r\n\r\n")
self.assertEquals(read_http(fd)[-1], "pong")
def test_short_read_with_content_length(self):
body = self.body()
req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\nContent-Length:1000\r\n\r\n" + body
fd = self.connect()
fd.sendall(req)
self.assertEquals(read_http(fd)[-1], "this is ch")
self.ping(fd)
def test_short_read_with_zero_content_length(self):
body = self.body()
req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\nContent-Length:0\r\n\r\n" + body
fd = self.connect()
fd.sendall(req)
self.assertEquals(read_http(fd)[-1], "this is ch")
self.ping(fd)
def test_short_read(self):
body = self.body()
req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
fd = self.connect()
fd.sendall(req)
self.assertEquals(read_http(fd)[-1], "this is ch")
self.ping(fd)
def test_dirt(self):
body = self.body(dirt="; here is dirt\0bla")
req = "POST /ping HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
fd = self.connect()
fd.sendall(req)
self.assertEquals(read_http(fd)[-1], "pong")
self.ping(fd)
def test_chunked_readline(self):
body = self.body()
req = "POST /lines HTTP/1.1\r\nContent-Length: %s\r\ntransfer-encoding: Chunked\r\n\r\n%s" % (len(body), body)
fd = self.connect()
fd.sendall(req)
self.assertEquals(read_http(fd)[-1], 'this is chunked\nline 2\nline3')
def test_close_before_finished(self):
import signal
got_signal = []
def handler(*args):
got_signal.append(1)
raise KeyboardInterrupt()
signal.signal(signal.SIGALRM, handler)
signal.alarm(1)
try:
body = '4\r\nthi'
req = "POST /short-read HTTP/1.1\r\ntransfer-encoding: Chunked\r\n\r\n" + body
fd = self.connect()
fd.sendall(req)
fd.close()
eventlet.sleep(0.0)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
assert not got_signal, "caught alarm signal. infinite loop detected."
def test_ipv6(self):
try:
sock = eventlet.listen(('::1', 0), family=socket.AF_INET6)
except (socket.gaierror, socket.error): # probably no ipv6
return
log = StringIO()
# first thing the server does is try to log the IP it's bound to
def run_server():
try:
server = wsgi.server(sock=sock, log=log, site=Site())
except ValueError:
log.write('broked')
eventlet.spawn_n(run_server)
logval = log.getvalue()
while not logval:
eventlet.sleep(0.0)
logval = log.getvalue()
if 'broked' in logval:
self.fail('WSGI server raised exception with ipv6 socket')
if __name__ == '__main__':
main()
| |
from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
from collections import namedtuple, defaultdict
import sys
import copy
import warnings
import traceback
from .tracing import event
from numba import (bytecode, interpreter, funcdesc, postproc,
typing, typeinfer, lowering, pylowering, utils, config,
errors, types, ir, rewrites, transforms)
from numba.targets import cpu, callconv
from numba.annotations import type_annotations
from numba.parfor import PreParforPass, ParforPass, Parfor, ParforDiagnostics
from numba.inline_closurecall import InlineClosureCallPass
from numba.errors import CompilerError
from numba.ir_utils import raise_on_unsupported_feature, warn_deprecated
from numba.compiler_lock import global_compiler_lock
from numba.analysis import dead_branch_prune
# terminal color markup
_termcolor = errors.termcolor()
class Flags(utils.ConfigOptions):
# These options are all false by default, but the defaults are
# different with the @jit decorator (see targets.options.TargetOptions).
OPTIONS = {
# Enable loop-lifting
'enable_looplift': False,
# Enable pyobject mode (in general)
'enable_pyobject': False,
# Enable pyobject mode inside lifted loops
'enable_pyobject_looplift': False,
# Force pyobject mode inside the whole function
'force_pyobject': False,
# Release GIL inside the native function
'release_gil': False,
'no_compile': False,
'debuginfo': False,
'boundcheck': False,
'forceinline': False,
'no_cpython_wrapper': False,
# Enable automatic parallel optimization, can be fine-tuned by taking
# a dictionary of sub-options instead of a boolean, see parfor.py for
# detail.
'auto_parallel': cpu.ParallelOptions(False),
'nrt': False,
'no_rewrites': False,
'error_model': 'python',
'fastmath': cpu.FastMathOptions(False),
'noalias': False,
}
DEFAULT_FLAGS = Flags()
DEFAULT_FLAGS.set('nrt')
CR_FIELDS = ["typing_context",
"target_context",
"entry_point",
"typing_error",
"type_annotation",
"signature",
"objectmode",
"lifted",
"fndesc",
"interpmode",
"library",
"call_helper",
"environment",
"metadata",
# List of functions to call to initialize on unserialization
# (i.e cache load).
"reload_init",
]
class CompileResult(namedtuple("_CompileResult", CR_FIELDS)):
__slots__ = ()
def _reduce(self):
"""
Reduce a CompileResult to picklable components.
"""
libdata = self.library.serialize_using_object_code()
# Make it (un)picklable efficiently
typeann = str(self.type_annotation)
fndesc = self.fndesc
# Those don't need to be pickled and may fail
fndesc.typemap = fndesc.calltypes = None
return (libdata, self.fndesc, self.environment, self.signature,
self.objectmode, self.interpmode, self.lifted, typeann,
self.reload_init)
@classmethod
def _rebuild(cls, target_context, libdata, fndesc, env,
signature, objectmode, interpmode, lifted, typeann,
reload_init):
if reload_init:
# Re-run all
for fn in reload_init:
fn()
library = target_context.codegen().unserialize_library(libdata)
cfunc = target_context.get_executable(library, fndesc, env)
cr = cls(target_context=target_context,
typing_context=target_context.typing_context,
library=library,
environment=env,
entry_point=cfunc,
fndesc=fndesc,
type_annotation=typeann,
signature=signature,
objectmode=objectmode,
interpmode=interpmode,
lifted=lifted,
typing_error=None,
call_helper=None,
metadata=None, # Do not store, arbitrary and potentially large!
reload_init=reload_init,
)
return cr
_LowerResult = namedtuple("_LowerResult", [
"fndesc",
"call_helper",
"cfunc",
"env",
])
def compile_result(**kws):
keys = set(kws.keys())
fieldset = set(CR_FIELDS)
badnames = keys - fieldset
if badnames:
raise NameError(*badnames)
missing = fieldset - keys
for k in missing:
kws[k] = None
# Avoid keeping alive traceback variables
if sys.version_info >= (3,):
err = kws['typing_error']
if err is not None:
kws['typing_error'] = err.with_traceback(None)
return CompileResult(**kws)
def compile_isolated(func, args, return_type=None, flags=DEFAULT_FLAGS,
locals={}):
"""
Compile the function in an isolated environment (typing and target
context).
Good for testing.
"""
from .targets.registry import cpu_target
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
# Register the contexts in case for nested @jit or @overload calls
with cpu_target.nested_context(typingctx, targetctx):
return compile_extra(typingctx, targetctx, func, args, return_type,
flags, locals)
def run_frontend(func):
"""
Run the compiler frontend over the given Python function, and return
the function's canonical Numba IR.
"""
# XXX make this a dedicated Pipeline?
func_id = bytecode.FunctionIdentity.from_function(func)
interp = interpreter.Interpreter(func_id)
bc = bytecode.ByteCode(func_id=func_id)
func_ir = interp.interpret(bc)
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
return func_ir
class _CompileStatus(object):
"""
Used like a C record
"""
__slots__ = ['fail_reason', 'can_fallback', 'can_giveup']
def __init__(self, can_fallback, can_giveup):
self.fail_reason = None
self.can_fallback = can_fallback
self.can_giveup = can_giveup
def __repr__(self):
vals = []
for k in self.__slots__:
vals.append("{k}={v}".format(k=k, v=getattr(self, k)))
return ', '.join(vals)
class _EarlyPipelineCompletion(Exception):
def __init__(self, result):
self.result = result
class _PipelineManager(object):
def __init__(self):
self.pipeline_order = []
self.pipeline_stages = {}
self._finalized = False
def create_pipeline(self, pipeline_name):
assert not self._finalized, "Pipelines can no longer be added"
self.pipeline_order.append(pipeline_name)
self.pipeline_stages[pipeline_name] = []
self.current = pipeline_name
def add_stage(self, stage_function, stage_description):
assert not self._finalized, "Stages can no longer be added."
current_pipeline_name = self.pipeline_order[-1]
func_desc_tuple = (stage_function, stage_description)
self.pipeline_stages[current_pipeline_name].append(func_desc_tuple)
def finalize(self):
self._finalized = True
def _patch_error(self, desc, exc):
"""
Patches the error to show the stage that it arose in.
"""
newmsg = "{desc}\n{exc}".format(desc=desc, exc=exc)
# For python2, attach the traceback of the previous exception.
if not utils.IS_PY3 and config.FULL_TRACEBACKS:
# strip the new message to just print the error string and not
# the marked up source etc (this is handled already).
stripped = _termcolor.errmsg(newmsg.split('\n')[1])
fmt = "Caused By:\n{tb}\n{newmsg}"
newmsg = fmt.format(tb=traceback.format_exc(), newmsg=stripped)
exc.args = (newmsg,)
return exc
@global_compiler_lock
def run(self, status):
assert self._finalized, "PM must be finalized before run()"
for pipeline_name in self.pipeline_order:
event("Pipeline: %s" % pipeline_name)
is_final_pipeline = pipeline_name == self.pipeline_order[-1]
for stage, stage_name in self.pipeline_stages[pipeline_name]:
try:
event("-- %s" % stage_name)
stage()
except _EarlyPipelineCompletion as e:
return e.result
except BaseException as e:
msg = "Failed in %s mode pipeline (step: %s)" % \
(pipeline_name, stage_name)
patched_exception = self._patch_error(msg, e)
# No more fallback pipelines?
if is_final_pipeline:
raise patched_exception
# Go to next fallback pipeline
else:
status.fail_reason = patched_exception
break
else:
return None
# TODO save all error information
raise CompilerError("All pipelines have failed")
class BasePipeline(object):
"""
Stores and manages states for the compiler pipeline
"""
def __init__(self, typingctx, targetctx, library, args, return_type, flags,
locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.typingctx = typingctx
self.targetctx = _make_subtarget(targetctx, flags)
self.library = library
self.args = args
self.return_type = return_type
self.flags = flags
self.locals = locals
# Results of various steps of the compilation pipeline
self.bc = None
self.func_id = None
self.func_ir = None
self.lifted = None
self.lifted_from = None
self.typemap = None
self.calltypes = None
self.type_annotation = None
self.metadata = {} # holds arbitrary inter-pipeline stage meta data
self.reload_init = []
# parfor diagnostics info, add to metadata
self.parfor_diagnostics = ParforDiagnostics()
self.metadata['parfor_diagnostics'] = self.parfor_diagnostics
self.status = _CompileStatus(
can_fallback=self.flags.enable_pyobject,
can_giveup=config.COMPATIBILITY_MODE
)
@contextmanager
def fallback_context(self, msg):
"""
Wraps code that would signal a fallback to object mode
"""
try:
yield
except BaseException as e:
if not self.status.can_fallback:
raise
else:
if utils.PYVERSION >= (3,):
# Clear all references attached to the traceback
e = e.with_traceback(None)
# this emits a warning containing the error message body in the
# case of fallback from npm to objmode
loop_lift = '' if self.flags.enable_looplift else 'OUT'
msg_rewrite = ("\nCompilation is falling back to object mode "
"WITH%s looplifting enabled because %s"
% (loop_lift, msg))
warnings.warn_explicit('%s due to: %s' % (msg_rewrite, e),
errors.NumbaWarning,
self.func_id.filename,
self.func_id.firstlineno)
raise
@contextmanager
def giveup_context(self, msg):
"""
Wraps code that would signal a fallback to interpreter mode
"""
try:
yield
except BaseException as e:
if not self.status.can_giveup:
raise
else:
if utils.PYVERSION >= (3,):
# Clear all references attached to the traceback
e = e.with_traceback(None)
warnings.warn_explicit('%s: %s' % (msg, e),
errors.NumbaWarning,
self.func_id.filename,
self.func_id.firstlineno)
raise
def extract_bytecode(self, func_id):
"""
Extract bytecode from function
"""
bc = bytecode.ByteCode(func_id)
if config.DUMP_BYTECODE:
print(bc.dump())
return bc
def compile_extra(self, func):
self.func_id = bytecode.FunctionIdentity.from_function(func)
try:
bc = self.extract_bytecode(self.func_id)
except BaseException as e:
if self.status.can_giveup:
self.stage_compile_interp_mode()
return self.cr
else:
raise e
self.bc = bc
self.lifted = ()
self.lifted_from = None
return self._compile_bytecode()
def compile_ir(self, func_ir, lifted=(), lifted_from=None):
self.func_id = func_ir.func_id
self.lifted = lifted
self.lifted_from = lifted_from
self._set_and_check_ir(func_ir)
return self._compile_ir()
def stage_analyze_bytecode(self):
"""
Analyze bytecode and translating to Numba IR
"""
func_ir = translate_stage(self.func_id, self.bc)
self._set_and_check_ir(func_ir)
def _set_and_check_ir(self, func_ir):
self.func_ir = func_ir
self.nargs = self.func_ir.arg_count
if not self.args and self.flags.force_pyobject:
# Allow an empty argument types specification when object mode
# is explicitly requested.
self.args = (types.pyobject,) * self.nargs
elif len(self.args) != self.nargs:
raise TypeError("Signature mismatch: %d argument types given, "
"but function takes %d arguments"
% (len(self.args), self.nargs))
def stage_process_ir(self):
ir_processing_stage(self.func_ir)
def frontend_looplift(self):
"""
Loop lifting analysis and transformation
"""
loop_flags = self.flags.copy()
outer_flags = self.flags.copy()
# Do not recursively loop lift
outer_flags.unset('enable_looplift')
loop_flags.unset('enable_looplift')
if not self.flags.enable_pyobject_looplift:
loop_flags.unset('enable_pyobject')
main, loops = transforms.loop_lifting(self.func_ir,
typingctx=self.typingctx,
targetctx=self.targetctx,
locals=self.locals,
flags=loop_flags)
if loops:
# Some loops were extracted
if config.DEBUG_FRONTEND or config.DEBUG:
for loop in loops:
print("Lifting loop", loop.get_source_location())
cres = compile_ir(self.typingctx, self.targetctx, main,
self.args, self.return_type,
outer_flags, self.locals,
lifted=tuple(loops), lifted_from=None,
is_lifted_loop=True)
return cres
def stage_frontend_withlift(self):
"""
Extract with-contexts
"""
main, withs = transforms.with_lifting(
func_ir=self.func_ir,
typingctx=self.typingctx,
targetctx=self.targetctx,
flags=self.flags,
locals=self.locals,
)
if withs:
cres = compile_ir(self.typingctx, self.targetctx, main,
self.args, self.return_type,
self.flags, self.locals,
lifted=tuple(withs), lifted_from=None,
pipeline_class=type(self))
raise _EarlyPipelineCompletion(cres)
def stage_objectmode_frontend(self):
"""
Front-end: Analyze bytecode, generate Numba IR, infer types
"""
if self.flags.enable_looplift:
assert not self.lifted
cres = self.frontend_looplift()
if cres is not None:
raise _EarlyPipelineCompletion(cres)
# Fallback typing: everything is a python object
self.typemap = defaultdict(lambda: types.pyobject)
self.calltypes = defaultdict(lambda: types.pyobject)
self.return_type = types.pyobject
def stage_dead_branch_prune(self):
"""
This prunes dead branches, a dead branch is one which is derivable as
not taken at compile time purely based on const/literal evaluation.
"""
assert self.func_ir
msg = ('Internal error in pre-inference dead branch pruning '
'pass encountered during compilation of '
'function "%s"' % (self.func_id.func_name,))
with self.fallback_context(msg):
dead_branch_prune(self.func_ir, self.args)
if config.DEBUG or config.DUMP_IR:
print('branch_pruned_ir'.center(80, '-'))
print(self.func_ir.dump())
print('end branch_pruned_ir'.center(80, '-'))
def stage_nopython_frontend(self):
"""
Type inference and legalization
"""
with self.fallback_context('Function "%s" failed type inference'
% (self.func_id.func_name,)):
# Type inference
typemap, return_type, calltypes = type_inference_stage(
self.typingctx,
self.func_ir,
self.args,
self.return_type,
self.locals)
self.typemap = typemap
self.return_type = return_type
self.calltypes = calltypes
with self.fallback_context('Function "%s" has invalid return type'
% (self.func_id.func_name,)):
legalize_return_type(self.return_type, self.func_ir,
self.targetctx)
def stage_generic_rewrites(self):
"""
Perform any intermediate representation rewrites before type
inference.
"""
assert self.func_ir
msg = ('Internal error in pre-inference rewriting '
'pass encountered during compilation of '
'function "%s"' % (self.func_id.func_name,))
with self.fallback_context(msg):
rewrites.rewrite_registry.apply('before-inference',
self, self.func_ir)
def stage_nopython_rewrites(self):
"""
Perform any intermediate representation rewrites after type
inference.
"""
# Ensure we have an IR and type information.
assert self.func_ir
assert isinstance(getattr(self, 'typemap', None), dict)
assert isinstance(getattr(self, 'calltypes', None), dict)
msg = ('Internal error in post-inference rewriting '
'pass encountered during compilation of '
'function "%s"' % (self.func_id.func_name,))
with self.fallback_context(msg):
rewrites.rewrite_registry.apply('after-inference',
self, self.func_ir)
def stage_pre_parfor_pass(self):
"""
Preprocessing for data-parallel computations.
"""
# Ensure we have an IR and type information.
assert self.func_ir
preparfor_pass = PreParforPass(
self.func_ir,
self.type_annotation.typemap,
self.type_annotation.calltypes, self.typingctx,
self.flags.auto_parallel,
self.parfor_diagnostics.replaced_fns
)
preparfor_pass.run()
def stage_parfor_pass(self):
"""
Convert data-parallel computations into Parfor nodes
"""
# Ensure we have an IR and type information.
assert self.func_ir
parfor_pass = ParforPass(self.func_ir, self.type_annotation.typemap,
self.type_annotation.calltypes, self.return_type, self.typingctx,
self.flags.auto_parallel, self.flags, self.parfor_diagnostics)
parfor_pass.run()
# check the parfor pass worked and warn if it didn't
has_parfor = False
for blk in self.func_ir.blocks.values():
for stmnt in blk.body:
if isinstance(stmnt, Parfor):
has_parfor = True
break
else:
continue
break
if not has_parfor:
# parfor calls the compiler chain again with a string
if not self.func_ir.loc.filename == '<string>':
url = ("http://numba.pydata.org/numba-doc/latest/user/"
"parallel.html#diagnostics")
msg = ("\nThe keyword argument 'parallel=True' was specified "
"but no transformation for parallel execution was "
"possible.\n\nTo find out why, try turning on parallel "
"diagnostics, see %s for help." % url)
warnings.warn(errors.NumbaPerformanceWarning(msg,
self.func_ir.loc))
# Add reload function to initialize the parallel backend.
self.reload_init.append(_reload_parfors)
def stage_inline_pass(self):
"""
Inline calls to locally defined closures.
"""
# Ensure we have an IR and type information.
assert self.func_ir
# if the return type is a pyobject, there's no type info available and
# no ability to resolve certain typed function calls in the array
# inlining code, use this variable to indicate
typed_pass = not isinstance(self.return_type, types.misc.PyObject)
inline_pass = InlineClosureCallPass(self.func_ir,
self.flags.auto_parallel,
self.parfor_diagnostics.replaced_fns,
typed_pass)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = self.func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
self.func_ir.dump()
def stage_annotate_type(self):
"""
Create type annotation after type inference
"""
self.type_annotation = type_annotations.TypeAnnotation(
func_ir=self.func_ir,
typemap=self.typemap,
calltypes=self.calltypes,
lifted=self.lifted,
lifted_from=self.lifted_from,
args=self.args,
return_type=self.return_type,
html_output=config.HTML)
if config.ANNOTATE:
print("ANNOTATION".center(80, '-'))
print(self.type_annotation)
print('=' * 80)
if config.HTML:
with open(config.HTML, 'w') as fout:
self.type_annotation.html_annotate(fout)
def stage_dump_diagnostics(self):
if self.flags.auto_parallel.enabled:
if config.PARALLEL_DIAGNOSTICS:
if self.parfor_diagnostics is not None:
self.parfor_diagnostics.dump(config.PARALLEL_DIAGNOSTICS)
else:
raise RuntimeError("Diagnostics failed.")
def backend_object_mode(self):
"""
Object mode compilation
"""
with self.giveup_context("Function %s failed at object mode lowering"
% (self.func_id.func_name,)):
if len(self.args) != self.nargs:
# append missing
self.args = (tuple(self.args) + (types.pyobject,) *
(self.nargs - len(self.args)))
return py_lowering_stage(self.targetctx,
self.library,
self.func_ir,
self.flags)
def backend_nopython_mode(self):
"""Native mode compilation"""
msg = ("Function %s failed at nopython "
"mode lowering" % (self.func_id.func_name,))
with self.fallback_context(msg):
return native_lowering_stage(
self.targetctx,
self.library,
self.func_ir,
self.typemap,
self.return_type,
self.calltypes,
self.flags,
self.metadata)
def _backend(self, lowerfn, objectmode):
"""
Back-end: Generate LLVM IR from Numba IR, compile to machine code
"""
if self.library is None:
codegen = self.targetctx.codegen()
self.library = codegen.create_library(self.func_id.func_qualname)
# Enable object caching upfront, so that the library can
# be later serialized.
self.library.enable_object_caching()
lowered = lowerfn()
signature = typing.signature(self.return_type, *self.args)
self.cr = compile_result(
typing_context=self.typingctx,
target_context=self.targetctx,
entry_point=lowered.cfunc,
typing_error=self.status.fail_reason,
type_annotation=self.type_annotation,
library=self.library,
call_helper=lowered.call_helper,
signature=signature,
objectmode=objectmode,
interpmode=False,
lifted=self.lifted,
fndesc=lowered.fndesc,
environment=lowered.env,
metadata=self.metadata,
reload_init=self.reload_init,
)
def stage_objectmode_backend(self):
"""
Lowering for object mode
"""
lowerfn = self.backend_object_mode
self._backend(lowerfn, objectmode=True)
# Warn, deprecated behaviour, code compiled in objmode without
# force_pyobject indicates fallback from nopython mode
if not self.flags.force_pyobject:
# first warn about object mode and yes/no to lifted loops
if len(self.lifted) > 0:
warn_msg = ('Function "%s" was compiled in object mode without'
' forceobj=True, but has lifted loops.' %
(self.func_id.func_name,))
else:
warn_msg = ('Function "%s" was compiled in object mode without'
' forceobj=True.' % (self.func_id.func_name,))
warnings.warn(errors.NumbaWarning(warn_msg,
self.func_ir.loc))
url = ("http://numba.pydata.org/numba-doc/latest/reference/"
"deprecation.html#deprecation-of-object-mode-fall-"
"back-behaviour-when-using-jit")
msg = ("\nFall-back from the nopython compilation path to the "
"object mode compilation path has been detected, this is "
"deprecated behaviour.\n\nFor more information visit %s" %
url)
warnings.warn(errors.NumbaDeprecationWarning(msg, self.func_ir.loc))
if self.flags.release_gil:
warn_msg = ("Code running in object mode won't allow parallel"
" execution despite nogil=True.")
warnings.warn_explicit(warn_msg, errors.NumbaWarning,
self.func_id.filename,
self.func_id.firstlineno)
def stage_nopython_backend(self):
"""
Do lowering for nopython
"""
lowerfn = self.backend_nopython_mode
self._backend(lowerfn, objectmode=False)
def stage_compile_interp_mode(self):
"""
Just create a compile result for interpreter mode
"""
args = [types.pyobject] * len(self.args)
signature = typing.signature(types.pyobject, *args)
self.cr = compile_result(typing_context=self.typingctx,
target_context=self.targetctx,
entry_point=self.func_id.func,
typing_error=self.status.fail_reason,
type_annotation="<Interpreter mode function>",
signature=signature,
objectmode=False,
interpmode=True,
lifted=(),
fndesc=None,)
def stage_ir_legalization(self):
raise_on_unsupported_feature(self.func_ir, self.typemap)
warn_deprecated(self.func_ir, self.typemap)
def stage_cleanup(self):
"""
Cleanup intermediate results to release resources.
"""
def define_pipelines(self, pm):
"""Child classes override this to customize the pipeline.
"""
raise NotImplementedError()
def add_preprocessing_stage(self, pm):
"""Add the preprocessing stage that analyzes the bytecode to prepare
the Numba IR.
"""
if self.func_ir is None:
pm.add_stage(self.stage_analyze_bytecode, "analyzing bytecode")
pm.add_stage(self.stage_process_ir, "processing IR")
def add_pre_typing_stage(self, pm):
"""Add any stages that go before type-inference.
The current stages contain type-agnostic rewrite passes.
"""
if not self.flags.no_rewrites:
pm.add_stage(self.stage_generic_rewrites, "nopython rewrites")
pm.add_stage(self.stage_dead_branch_prune, "dead branch pruning")
pm.add_stage(self.stage_inline_pass,
"inline calls to locally defined closures")
def add_typing_stage(self, pm):
"""Add the type-inference stage necessary for nopython mode.
"""
pm.add_stage(self.stage_nopython_frontend, "nopython frontend")
pm.add_stage(self.stage_annotate_type, "annotate type")
def add_optimization_stage(self, pm):
"""Add optimization stages.
"""
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_pre_parfor_pass,
"Preprocessing for parfors")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_parfor_pass, "convert to parfors")
def add_lowering_stage(self, pm):
"""Add the lowering (code-generation) stage for nopython-mode
"""
pm.add_stage(self.stage_nopython_backend, "nopython mode backend")
def add_cleanup_stage(self, pm):
"""Add the clean-up stage to remove intermediate results.
"""
pm.add_stage(self.stage_cleanup, "cleanup intermediate results")
def add_with_handling_stage(self, pm):
pm.add_stage(self.stage_frontend_withlift, "Handle with contexts")
def define_nopython_pipeline(self, pm, name='nopython'):
"""Add the nopython-mode pipeline to the pipeline manager
"""
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
self.add_with_handling_stage(pm)
self.add_pre_typing_stage(pm)
self.add_typing_stage(pm)
self.add_optimization_stage(pm)
pm.add_stage(self.stage_ir_legalization,
"ensure IR is legal prior to lowering")
self.add_lowering_stage(pm)
pm.add_stage(self.stage_dump_diagnostics, "dump diagnostics")
self.add_cleanup_stage(pm)
def define_objectmode_pipeline(self, pm, name='object'):
"""Add the object-mode pipeline to the pipeline manager
"""
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
pm.add_stage(self.stage_objectmode_frontend,
"object mode frontend")
pm.add_stage(self.stage_inline_pass,
"inline calls to locally defined closures")
pm.add_stage(self.stage_annotate_type, "annotate type")
pm.add_stage(self.stage_ir_legalization,
"ensure IR is legal prior to lowering")
pm.add_stage(self.stage_objectmode_backend, "object mode backend")
self.add_cleanup_stage(pm)
def define_interpreted_pipeline(self, pm, name="interp"):
"""Add the interpreted-mode (fallback) pipeline to the pipeline manager
"""
pm.create_pipeline(name)
pm.add_stage(self.stage_compile_interp_mode,
"compiling with interpreter mode")
self.add_cleanup_stage(pm)
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
pm = _PipelineManager()
self.define_pipelines(pm)
pm.finalize()
res = pm.run(self.status)
if res is not None:
# Early pipeline completion
return res
else:
assert self.cr is not None
return self.cr
def _compile_bytecode(self):
"""
Populate and run pipeline for bytecode input
"""
assert self.func_ir is None
return self._compile_core()
def _compile_ir(self):
"""
Populate and run pipeline for IR input
"""
assert self.func_ir is not None
return self._compile_core()
class Pipeline(BasePipeline):
"""The default compiler pipeline
"""
def define_pipelines(self, pm):
if not self.flags.force_pyobject:
self.define_nopython_pipeline(pm)
if self.status.can_fallback or self.flags.force_pyobject:
self.define_objectmode_pipeline(pm)
if self.status.can_giveup:
self.define_interpreted_pipeline(pm)
def _make_subtarget(targetctx, flags):
"""
Make a new target context from the given target context and flags.
"""
subtargetoptions = {}
if flags.debuginfo:
subtargetoptions['enable_debuginfo'] = True
if flags.boundcheck:
subtargetoptions['enable_boundcheck'] = True
if flags.nrt:
subtargetoptions['enable_nrt'] = True
if flags.auto_parallel:
subtargetoptions['auto_parallel'] = flags.auto_parallel
if flags.fastmath:
subtargetoptions['fastmath'] = flags.fastmath
error_model = callconv.create_error_model(flags.error_model, targetctx)
subtargetoptions['error_model'] = error_model
return targetctx.subtarget(**subtargetoptions)
def compile_extra(typingctx, targetctx, func, args, return_type, flags,
locals, library=None, pipeline_class=Pipeline):
"""Compiler entry point
Parameter
---------
typingctx :
typing context
targetctx :
target context
func : function
the python function to be compiled
args : tuple, list
argument types
return_type :
Use ``None`` to indicate void return
flags : numba.compiler.Flags
compiler flags
library : numba.codegen.CodeLibrary
Used to store the compiled code.
If it is ``None``, a new CodeLibrary is used.
pipeline_class : type like numba.compiler.BasePipeline
compiler pipeline
"""
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
def compile_ir(typingctx, targetctx, func_ir, args, return_type, flags,
locals, lifted=(), lifted_from=None, is_lifted_loop=False,
library=None, pipeline_class=Pipeline):
"""
Compile a function with the given IR.
For internal use only.
"""
# This is a special branch that should only run on IR from a lifted loop
if is_lifted_loop:
# This code is pessimistic and costly, but it is a not often trodden
# path and it will go away once IR is made immutable. The problem is
# that the rewrite passes can mutate the IR into a state that makes
# it possible for invalid tokens to be transmitted to lowering which
# then trickle through into LLVM IR and causes RuntimeErrors as LLVM
# cannot compile it. As a result the following approach is taken:
# 1. Create some new flags that copy the original ones but switch
# off rewrites.
# 2. Compile with 1. to get a compile result
# 3. Try and compile another compile result but this time with the
# original flags (and IR being rewritten).
# 4. If 3 was successful, use the result, else use 2.
# create flags with no rewrites
norw_flags = copy.deepcopy(flags)
norw_flags.no_rewrites = True
def compile_local(the_ir, the_flags):
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, the_flags, locals)
return pipeline.compile_ir(func_ir=the_ir, lifted=lifted,
lifted_from=lifted_from)
# compile with rewrites off, IR shouldn't be mutated irreparably
norw_cres = compile_local(func_ir.copy(), norw_flags)
# try and compile with rewrites on if no_rewrites was not set in the
# original flags, IR might get broken but we've got a CompileResult
# that's usable from above.
rw_cres = None
if not flags.no_rewrites:
# Suppress warnings in compilation retry
with warnings.catch_warnings():
warnings.simplefilter("ignore", errors.NumbaWarning)
try:
rw_cres = compile_local(func_ir.copy(), flags)
except Exception:
pass
# if the rewrite variant of compilation worked, use it, else use
# the norewrites backup
if rw_cres is not None:
cres = rw_cres
else:
cres = norw_cres
return cres
else:
pipeline = pipeline_class(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_ir(func_ir=func_ir, lifted=lifted,
lifted_from=lifted_from)
def compile_internal(typingctx, targetctx, library,
func, args, return_type, flags, locals):
"""
For internal use only.
"""
pipeline = Pipeline(typingctx, targetctx, library,
args, return_type, flags, locals)
return pipeline.compile_extra(func)
def legalize_return_type(return_type, interp, targetctx):
"""
Only accept array return type iff it is passed into the function.
Reject function object return types if in nopython mode.
"""
if not targetctx.enable_nrt and isinstance(return_type, types.Array):
# Walk IR to discover all arguments and all return statements
retstmts = []
caststmts = {}
argvars = set()
for bid, blk in interp.blocks.items():
for inst in blk.body:
if isinstance(inst, ir.Return):
retstmts.append(inst.value.name)
elif isinstance(inst, ir.Assign):
if (isinstance(inst.value, ir.Expr)
and inst.value.op == 'cast'):
caststmts[inst.target.name] = inst.value
elif isinstance(inst.value, ir.Arg):
argvars.add(inst.target.name)
assert retstmts, "No return statements?"
for var in retstmts:
cast = caststmts.get(var)
if cast is None or cast.value.name not in argvars:
raise TypeError("Only accept returning of array passed into "
"the function as argument")
elif (isinstance(return_type, types.Function) or
isinstance(return_type, types.Phantom)):
msg = "Can't return function object ({}) in nopython mode"
raise TypeError(msg.format(return_type))
def translate_stage(func_id, bytecode):
interp = interpreter.Interpreter(func_id)
return interp.interpret(bytecode)
def ir_processing_stage(func_ir):
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
if config.DEBUG or config.DUMP_IR:
name = func_ir.func_id.func_qualname
print(("IR DUMP: %s" % name).center(80, "-"))
func_ir.dump()
if func_ir.is_generator:
print(("GENERATOR INFO: %s" % name).center(80, "-"))
func_ir.dump_generator_info()
return func_ir
def type_inference_stage(typingctx, interp, args, return_type, locals={}):
if len(args) != interp.arg_count:
raise TypeError("Mismatch number of argument types")
warnings = errors.WarningsFixer(errors.NumbaWarning)
infer = typeinfer.TypeInferer(typingctx, interp, warnings)
with typingctx.callstack.register(infer, interp.func_id, args):
# Seed argument types
for index, (name, ty) in enumerate(zip(interp.arg_names, args)):
infer.seed_argument(name, index, ty)
# Seed return type
if return_type is not None:
infer.seed_return(return_type)
# Seed local types
for k, v in locals.items():
infer.seed_type(k, v)
infer.build_constraint()
infer.propagate()
typemap, restype, calltypes = infer.unify()
# Output all Numba warnings
warnings.flush()
return typemap, restype, calltypes
def native_lowering_stage(targetctx, library, interp, typemap, restype,
calltypes, flags, metadata):
# Lowering
fndesc = funcdesc.PythonFunctionDescriptor.from_specialized_function(
interp, typemap, restype, calltypes, mangler=targetctx.mangler,
inline=flags.forceinline, noalias=flags.noalias)
with targetctx.push_code_library(library):
lower = lowering.Lower(targetctx, library, fndesc, interp,
metadata=metadata)
lower.lower()
if not flags.no_cpython_wrapper:
lower.create_cpython_wrapper(flags.release_gil)
env = lower.env
call_helper = lower.call_helper
del lower
if flags.no_compile:
return _LowerResult(fndesc, call_helper, cfunc=None, env=env)
else:
# Prepare for execution
cfunc = targetctx.get_executable(library, fndesc, env)
# Insert native function for use by other jitted-functions.
# We also register its library to allow for inlining.
targetctx.insert_user_function(cfunc, fndesc, [library])
return _LowerResult(fndesc, call_helper, cfunc=cfunc, env=env)
def py_lowering_stage(targetctx, library, interp, flags):
fndesc = funcdesc.PythonFunctionDescriptor.from_object_mode_function(
interp
)
with targetctx.push_code_library(library):
lower = pylowering.PyLower(targetctx, library, fndesc, interp)
lower.lower()
if not flags.no_cpython_wrapper:
lower.create_cpython_wrapper()
env = lower.env
call_helper = lower.call_helper
del lower
if flags.no_compile:
return _LowerResult(fndesc, call_helper, cfunc=None, env=env)
else:
# Prepare for execution
cfunc = targetctx.get_executable(library, fndesc, env)
return _LowerResult(fndesc, call_helper, cfunc=cfunc, env=env)
def _reload_parfors():
"""Reloader for cached parfors
"""
# Re-initialize the parallel backend when load from cache.
from numba.npyufunc.parallel import _launch_threads
_launch_threads()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dataproc_v1.types import workflow_templates
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class WorkflowTemplateServiceTransport(abc.ABC):
"""Abstract transport class for WorkflowTemplateService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "dataproc.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_workflow_template: gapic_v1.method.wrap_method(
self.create_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_workflow_template: gapic_v1.method.wrap_method(
self.get_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.instantiate_workflow_template: gapic_v1.method.wrap_method(
self.instantiate_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method(
self.instantiate_inline_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.update_workflow_template: gapic_v1.method.wrap_method(
self.update_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.list_workflow_templates: gapic_v1.method.wrap_method(
self.list_workflow_templates,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.delete_workflow_template: gapic_v1.method.wrap_method(
self.delete_workflow_template,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_workflow_template(
self,
) -> Callable[
[workflow_templates.CreateWorkflowTemplateRequest],
Union[
workflow_templates.WorkflowTemplate,
Awaitable[workflow_templates.WorkflowTemplate],
],
]:
raise NotImplementedError()
@property
def get_workflow_template(
self,
) -> Callable[
[workflow_templates.GetWorkflowTemplateRequest],
Union[
workflow_templates.WorkflowTemplate,
Awaitable[workflow_templates.WorkflowTemplate],
],
]:
raise NotImplementedError()
@property
def instantiate_workflow_template(
self,
) -> Callable[
[workflow_templates.InstantiateWorkflowTemplateRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def instantiate_inline_workflow_template(
self,
) -> Callable[
[workflow_templates.InstantiateInlineWorkflowTemplateRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_workflow_template(
self,
) -> Callable[
[workflow_templates.UpdateWorkflowTemplateRequest],
Union[
workflow_templates.WorkflowTemplate,
Awaitable[workflow_templates.WorkflowTemplate],
],
]:
raise NotImplementedError()
@property
def list_workflow_templates(
self,
) -> Callable[
[workflow_templates.ListWorkflowTemplatesRequest],
Union[
workflow_templates.ListWorkflowTemplatesResponse,
Awaitable[workflow_templates.ListWorkflowTemplatesResponse],
],
]:
raise NotImplementedError()
@property
def delete_workflow_template(
self,
) -> Callable[
[workflow_templates.DeleteWorkflowTemplateRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("WorkflowTemplateServiceTransport",)
| |
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
from oslo.config import cfg
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU',
deprecated_group='DEFAULT',
deprecated_name='libvirt_use_virtio_for_bridges'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
# Since libvirt 0.9.11, <interface type='bridge'>
# supports OpenVSwitch natively.
LIBVIRT_OVS_VPORT_VERSION = 9011
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'kvm': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'xen': ['netfront', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtBaseVIFDriver(object):
def __init__(self, get_connection):
self.get_connection = get_connection
self.libvirt_version = None
def has_libvirt_version(self, want):
if self.libvirt_version is None:
conn = self.get_connection()
self.libvirt_version = conn.getLibVersion()
if self.libvirt_version >= want:
return True
return False
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_config(self, instance, vif, image_meta, inst_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.get('properties',
{}).get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
CONF.libvirt.virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = "virtio"
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if model == "virtio" and CONF.libvirt.virt_type == "qemu":
driver = "qemu"
if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=CONF.libvirt.virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
def plug(self, instance, vif):
pass
def unplug(self, instance, vif):
pass
class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
"""Generic VIF driver for libvirt networking."""
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta, inst_type):
"""Get VIF configurations for bridge type."""
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_ethernet(self, instance, vif,
image_meta, inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif,
image_meta, inst_type)
def get_config_ovs(self, instance, vif, image_meta, inst_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type)
else:
return self.get_config_ovs_ethernet(instance, vif,
image_meta,
inst_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance,
vif,
image_meta,
inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta, inst_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
params = vif["qbh_params"]
designer.set_vif_host_backend_802qbh_config(
conf, vif['network'].get_meta('interface'),
params['profileid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type):
conf = super(LibvirtGenericVIFDriver,
self).get_config(instance, vif,
image_meta, inst_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta, inst_type):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
return self.get_config_bridge(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_OVS:
return self.get_config_ovs(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_802_QBG:
return self.get_config_802qbg(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_802_QBH:
return self.get_config_802qbh(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_IVS:
return self.get_config_ivs(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
return self.get_config_iovisor(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
return self.get_config_mlnx_direct(instance,
vif,
image_meta,
inst_type)
elif vif_type == network_model.VIF_TYPE_MIDONET:
return self.get_config_midonet(instance,
vif,
image_meta,
inst_type)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug(_("Ensuring bridge %s"),
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_ethernet(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
dev, iface_id, vif['address'],
instance['uuid'])
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
self.plug_ovs_bridge(instance, vif)
else:
self.plug_ovs_ethernet(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ivs_vif_port(v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
network = vif['network']
vnic_mac = vif['address']
device_id = instance['uuid']
fabric = network['meta']['physical_network']
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_802qbh(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
super(LibvirtGenericVIFDriver,
self).plug(instance, vif)
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.plug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.plug_ovs(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBG:
self.plug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.plug_802qbh(instance, vif)
elif vif_type == network_model.VIF_TYPE_IVS:
self.plug_ivs(instance, vif)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
self.plug_iovisor(instance, vif)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
self.plug_mlnx_direct(instance, vif)
elif vif_type == network_model.VIF_TYPE_MIDONET:
self.plug_midonet(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_ovs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
self.unplug_ovs_bridge(instance, vif)
else:
self.unplug_ovs_ethernet(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
network = vif['network']
vnic_mac = vif['address']
fabric = network['meta']['physical_network']
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_802qbg(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_802qbh(self, instance, vif):
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
super(LibvirtGenericVIFDriver,
self).unplug(instance, vif)
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s'),
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
elif vif_type == network_model.VIF_TYPE_BRIDGE:
self.unplug_bridge(instance, vif)
elif vif_type == network_model.VIF_TYPE_OVS:
self.unplug_ovs(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBG:
self.unplug_802qbg(instance, vif)
elif vif_type == network_model.VIF_TYPE_802_QBH:
self.unplug_802qbh(instance, vif)
elif vif_type == network_model.VIF_TYPE_IVS:
self.unplug_ivs(instance, vif)
elif vif_type == network_model.VIF_TYPE_IOVISOR:
self.unplug_iovisor(instance, vif)
elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT:
self.unplug_mlnx_direct(instance, vif)
elif vif_type == network_model.VIF_TYPE_MIDONET:
self.unplug_midonet(instance, vif)
else:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
# The following classes were removed in the transition from Havana to
# Icehouse, but may still be referenced in configuration files. The
# following stubs allow those configurations to work while logging a
# deprecation warning.
class _LibvirtDeprecatedDriver(LibvirtGenericVIFDriver):
def __init__(self, *args, **kwargs):
LOG.warn('VIF driver \"%s\" is marked as deprecated and will be '
'removed in the Juno release.',
self.__class__.__name__)
super(_LibvirtDeprecatedDriver, self).__init__(*args, **kwargs)
class LibvirtBridgeDriver(_LibvirtDeprecatedDriver):
pass
class LibvirtOpenVswitchDriver(_LibvirtDeprecatedDriver):
pass
class LibvirtHybridOVSBridgeDriver(_LibvirtDeprecatedDriver):
pass
class LibvirtOpenVswitchVirtualPortDriver(_LibvirtDeprecatedDriver):
pass
class NeutronLinuxBridgeVIFDriver(_LibvirtDeprecatedDriver):
pass
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generator for C style prototypes and definitions """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('dstroot', 'Base directory of output', default=os.path.join('..', 'c'))
Option('guard', 'Include guard prefix', default=os.path.join('ppapi', 'c'))
Option('out', 'List of output files', default='')
def GetOutFileName(filenode, relpath=None, prefix=None):
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0] + '.h'
if prefix: name = '%s%s' % (prefix, name)
if path: name = os.path.join(path, name)
if relpath: name = os.path.join(relpath, name)
return name
def WriteGroupMarker(out, node, last_group):
# If we are part of a group comment marker...
if last_group and last_group != node.cls:
pre = CommentLines(['*',' @}', '']) + '\n'
else:
pre = '\n'
if node.cls in ['Typedef', 'Interface', 'Struct', 'Enum']:
if last_group != node.cls:
pre += CommentLines(['*',' @addtogroup %ss' % node.cls, ' @{', ''])
last_group = node.cls
else:
last_group = None
out.Write(pre)
return last_group
def GenerateHeader(out, filenode, releases):
cgen = CGen()
pref = ''
do_comments = True
# Generate definitions.
last_group = None
top_types = ['Typedef', 'Interface', 'Struct', 'Enum', 'Inline']
for node in filenode.GetListOf(*top_types):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skiping %s" % node
continue
# End/Start group marker
if do_comments:
last_group = WriteGroupMarker(out, node, last_group)
if node.IsA('Inline'):
item = node.GetProperty('VALUE')
# If 'C++' use __cplusplus wrapper
if node.GetName() == 'cc':
item = '#ifdef __cplusplus\n%s\n#endif // __cplusplus\n\n' % item
# If not C++ or C, then skip it
elif not node.GetName() == 'c':
continue
if item: out.Write(item)
continue
#
# Otherwise we are defining a file level object, so generate the
# correct document notation.
#
item = cgen.Define(node, releases, prefix=pref, comment=True)
if not item: continue
asize = node.GetProperty('assert_size()')
if asize:
name = '%s%s' % (pref, node.GetName())
if node.IsA('Struct'):
form = 'PP_COMPILE_ASSERT_STRUCT_SIZE_IN_BYTES(%s, %s);\n'
elif node.IsA('Enum'):
if node.GetProperty('notypedef'):
form = 'PP_COMPILE_ASSERT_ENUM_SIZE_IN_BYTES(%s, %s);\n'
else:
form = 'PP_COMPILE_ASSERT_SIZE_IN_BYTES(%s, %s);\n'
else:
form = 'PP_COMPILE_ASSERT_SIZE_IN_BYTES(%s, %s);\n'
item += form % (name, asize[0])
if item: out.Write(item)
if last_group:
out.Write(CommentLines(['*',' @}', '']) + '\n')
class HGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'C Header', 'cgen', 'Generate the C headers.')
def GenerateFile(self, filenode, releases, options):
savename = GetOutFileName(filenode, GetOption('dstroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
out = IDLOutFile(savename)
self.GenerateHead(out, filenode, releases, options)
self.GenerateBody(out, filenode, releases, options)
self.GenerateTail(out, filenode, releases, options)
return out.Close()
def GenerateHead(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
gpath = GetOption('guard')
def_guard = GetOutFileName(filenode, relpath=gpath)
def_guard = def_guard.replace(os.sep,'_').replace('.','_').upper() + '_'
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
fileinfo = filenode.GetChildren()[1]
assert(fileinfo.IsA('Comment'))
out.Write('%s\n' % cgen.Copyright(cright_node))
# Wrap the From ... modified ... comment if it would be >80 characters.
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
if len(from_text) + len(modified_text) < 74:
out.Write('/* %s %s */\n\n' % (from_text, modified_text))
else:
out.Write('/* %s,\n * %s\n */\n\n' % (from_text, modified_text))
out.Write('#ifndef %s\n#define %s\n\n' % (def_guard, def_guard))
# Generate set of includes
deps = set()
for release in releases:
deps |= filenode.GetDeps(release)
includes = set([])
for dep in deps:
depfile = dep.GetProperty('FILE')
if depfile:
includes.add(depfile)
includes = [GetOutFileName(
include, relpath=gpath).replace(os.sep, '/') for include in includes]
includes.append('ppapi/c/pp_macros.h')
# Assume we need stdint if we "include" C or C++ code
if filenode.GetListOf('Include'):
includes.append('ppapi/c/pp_stdint.h')
includes = sorted(set(includes))
cur_include = GetOutFileName(filenode, relpath=gpath).replace(os.sep, '/')
for include in includes:
if include == cur_include: continue
out.Write('#include "%s"\n' % include)
# If we are generating a single release, then create a macro for the highest
# available release number.
if filenode.GetProperty('NAME').endswith('pp_macros.idl'):
releasestr = GetOption('release')
if releasestr:
release_numbers = re.findall('\d+', releasestr)
if release_numbers:
out.Write('\n#define PPAPI_RELEASE %s\n' % release_numbers[0])
# Generate all interface defines
out.Write('\n')
for node in filenode.GetListOf('Interface'):
idefs = ''
macro = cgen.GetInterfaceMacro(node)
unique = node.GetUniqueReleases(releases)
# Skip this interface if there are no matching versions
if not unique: continue
for rel in unique:
version = node.GetVersion(rel)
name = cgen.GetInterfaceString(node, version)
strver = str(version).replace('.', '_')
idefs += cgen.GetDefine('%s_%s' % (macro, strver), '"%s"' % name)
idefs += cgen.GetDefine(macro, '%s_%s' % (macro, strver)) + '\n'
out.Write(idefs)
# Generate the @file comment
out.Write('%s\n' % Comment(fileinfo, prefix='*\n @file'))
def GenerateBody(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
GenerateHeader(out, filenode, releases)
def GenerateTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options,releases'
gpath = GetOption('guard')
def_guard = GetOutFileName(filenode, relpath=gpath)
def_guard = def_guard.replace(os.sep,'_').replace('.','_').upper() + '_'
out.Write('#endif /* %s */\n\n' % def_guard)
hgen = HGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--dstroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_cgen', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if hgen.GenerateRelease(ast, 'M14', {}):
print "Golden file for M14 failed."
failed = 1
else:
print "Golden file for M14 passed."
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_cgen_range', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if hgen.GenerateRange(ast, ['M13', 'M14', 'M15'], {}):
print "Golden file for M13-M15 failed."
failed =1
else:
print "Golden file for M13-M15 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| |
import codecs
import os
import bisect
import re
import copy
import logging
from collections import OrderedDict
try:
import webcolors
except:
webcolors = None
from tools import Timecodes
from common import PrassError, zip, map, itervalues, iterkeys, iteritems, py2_unicode_compatible
STYLES_SECTION = u"[V4+ Styles]"
EVENTS_SECTION = u"[Events]"
SCRIPT_INFO_SECTION = u"[Script Info]"
def parse_ass_time(string):
hours, minutes, seconds, centiseconds = map(int, re.match(r"(\d+):(\d+):(\d+)\.(\d+)", string).groups())
return hours * 3600000 + minutes * 60000 + seconds * 1000 + centiseconds * 10
def parse_srt_time(string):
hours, minutes, seconds, milliseconds = map(int, re.match(r"(\d+):(\d+):(\d+)\,(\d+)", string).groups())
return hours * 3600000 + minutes * 60000 + seconds * 1000 + milliseconds
def srt_line_to_ass(line, box=False):
line = line.replace('\n', r'\N')
if '<' in line:
for tag in ['i', 'b', 'u', 's']:
line = line.replace('<%s>' % tag, '{\\%s1}' % tag)
line = line.replace('</%s>' % tag, '{\\%s0}' % tag)
while '<font color="' in line:
pre, color, post = re.match(r'(.*)\<font color="(.*?)"\>(.*)', line).groups()
if color.startswith('#'):
r, g, b = color[1:3], color[3:5], color[5:]
elif webcolors:
r, g, b = map(lambda x: "%02X" % x, webcolors.name_to_rgb(color))
else:
logging.warning('Can\'t parse color "%s", please install webcolors module.' % color)
break
line = pre + '{\c&H%s%s%s&}' % (b, g, r) + post
line = line.replace('</font>', '{\c&HFFFFFF&}')
return line
def format_time(ms):
cs = int(ms / 10.0)
return u'{0}:{1:02d}:{2:02d}.{3:02d}'.format(
int(cs // 360000),
int((cs // 6000) % 60),
int((cs // 100) % 60),
int(cs % 100))
class AssStyle(object):
def __init__(self, name, definition):
self.name = name
self.definition = definition
@classmethod
def from_string(cls, text):
name, definition = text.split(',', 1)
return cls(name=name.strip(), definition=definition.strip())
def resample(self, from_width, from_height, to_width, to_height, scale_border_and_shadow=True):
scale_height = to_height / float(from_height)
scale_width = to_width / float(from_width)
old_ar = from_width / float(from_height)
new_ar = to_width / float(to_height)
horizontal_stretch = 1.0
if abs(old_ar - new_ar) / new_ar > 0.01:
horizontal_stretch = new_ar / old_ar
parts = self.definition.split(",")
parts[1] = "%i" % (round(int(parts[1]) * scale_height)) # font size
parts[10] = "%g" % (float(parts[10]) * horizontal_stretch) # scale x
parts[12] = "%g" % (float(parts[12]) * scale_width) # spacing
if scale_border_and_shadow:
parts[15] = "%g" % (float(parts[15]) * scale_height) # outline
parts[16] = "%g" % (float(parts[16]) * scale_height) # shadow
parts[18] = "%i" % (round(float(parts[18]) * scale_width)) # margin l
parts[19] = "%i" % (round(float(parts[19]) * scale_width)) # margin r
parts[20] = "%i" % (round(float(parts[20]) * scale_height)) # margin v
self.definition = u",".join(parts)
@py2_unicode_compatible
class AssEvent(object):
__slots__ = (
"kind",
"layer",
"start",
"end",
"style",
"actor",
"margin_left",
"margin_right",
"margin_vertical",
"effect",
"text"
)
def __init__(self, start, end, text, kind='Dialogue', layer=0, style='Default', actor='',
margin_left=0, margin_right=0, margin_vertical=0, effect=''):
self.kind = kind
self.layer = layer
self.start = start
self.end = end
self.style = style
self.actor = actor
self.margin_left = margin_left
self.margin_right = margin_right
self.margin_vertical = margin_vertical
self.effect = effect
self.text = text
@classmethod
def from_text(cls, text):
kind, _, rest = text.partition(u":")
split = [x.strip() for x in rest.split(',', 9)]
return cls(
kind=kind,
layer=int(split[0]),
start=parse_ass_time(split[1]),
end=parse_ass_time(split[2]),
style=split[3],
actor=split[4],
margin_left=split[5],
margin_right=split[6],
margin_vertical=split[7],
effect=split[8],
text=split[9]
)
def __str__(self):
return u'{0}: {1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(self.kind, self.layer,
format_time(self.start),
format_time(self.end),
self.style, self.actor,
self.margin_left, self.margin_right,
self.margin_vertical, self.effect,
self.text)
@property
def is_comment(self):
return self.kind.lower() == u'comment'
def collides_with(self, other):
if self.start < other.start:
return self.end > other.start
return self.start < other.end
class StylesSection(object):
def __init__(self):
self.styles = OrderedDict()
def parse_line(self, text):
if text.startswith(u'Format:'):
return
style = AssStyle.from_string(text.partition(u":")[2])
self.styles[style.name] = style
def format_section(self):
lines = [u'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding']
lines.extend(u'Style: {0},{1}'.format(style.name, style.definition) for style in itervalues(self.styles))
return lines
class EventsSection(object):
def __init__(self):
self.events = []
def parse_line(self, text):
if text.startswith(u'Format:'):
return
self.events.append(AssEvent.from_text(text))
def format_section(self):
lines = [u'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text']
lines.extend(u"%s" % x for x in self.events)
return lines
class ScriptInfoSection(object):
class PropertyLine(object):
def __init__(self, name, value):
self.name = name
self.value = value
@classmethod
def from_string(cls, string_value):
if string_value.startswith(';'):
return cls(string_value, None)
else:
name, _, value = string_value.partition(':')
return cls(name, value.strip())
def to_string(self):
if self.value is None:
return self.name
return u"{0}: {1}".format(self.name, self.value)
def __init__(self):
self._lines_dict = OrderedDict()
def parse_line(self, text):
prop = self.PropertyLine.from_string(text)
self._lines_dict[prop.name] = prop
def format_section(self):
return [x.to_string() for x in itervalues(self._lines_dict)]
def get_property(self, name):
if name not in self._lines_dict:
raise KeyError("Property {0} not found".format(name))
return self._lines_dict[name].value
def set_property(self, name, value):
if name not in self._lines_dict:
self._lines_dict[name] = self.PropertyLine(name, str(value))
else:
self._lines_dict[name].value = str(value)
def get_resolution(self):
try:
width = int(self.get_property("PlayResX"))
height = int(self.get_property("PlayResY"))
return width, height
except KeyError:
return None, None
def set_resolution(self, width, height):
self.set_property("PlayResX", width)
self.set_property("PlayResY", height)
def get_scaled_border_property(self):
try:
return self.get_property("ScaledBorderAndShadow")=="yes"
except KeyError:
return True
class GenericSection(object):
def __init__(self):
self.lines = []
def parse_line(self, line):
self.lines.append(line)
def format_section(self):
return self.lines
class AttachmentSection(GenericSection):
def parse_line(self, line):
if not line:
return False
self.lines.append(line)
# as usual, copied from aegisub
is_valid = 0 < len(line) <= 80 #and all(33 <= ord(x) < 97 for x in line)
is_filename = line.startswith("fontname: ") or line.startswith("filename: ")
return is_valid or is_filename
class AssScript(object):
def __init__(self, sections_list):
super(AssScript, self).__init__()
self._sections_list = sections_list
@property
def _events(self):
return self._find_section(EVENTS_SECTION).events
@_events.setter
def _events(self, value):
self._find_section(EVENTS_SECTION).events = value
@property
def _styles(self):
return self._find_section(STYLES_SECTION).styles
def _find_section(self, name):
return next((section for section_name, section in self._sections_list if section_name == name), None)
@classmethod
def from_ass_stream(cls, file_object):
sections = []
current_section = None
force_last_section = False
for idx, line in enumerate(file_object):
line = line.strip()
# required because a line might be both a part of an attachment and a valid header
if force_last_section:
try:
force_last_section = current_section.parse_line(line)
continue
except Exception as e:
raise PrassError(u"That's some invalid ASS script: {0}".format(e.message))
if not line:
continue
low = line.lower()
if low == u'[v4+ styles]':
current_section = StylesSection()
sections.append((line, current_section))
elif low == u'[events]':
current_section = EventsSection()
sections.append((line, current_section))
elif low == u'[script info]':
current_section = ScriptInfoSection()
sections.append((line, current_section))
elif low == u'[graphics]' or low == u'[fonts]':
current_section = AttachmentSection()
sections.append((line, current_section))
elif re.match(r'^\s*\[.+?\]\s*$', low):
current_section = GenericSection()
sections.append((line, current_section))
elif not current_section:
raise PrassError(u"That's some invalid ASS script (no parse function at line {0})".format(idx))
else:
try:
force_last_section = current_section.parse_line(line)
except Exception as e:
raise PrassError(u"That's some invalid ASS script: {0}".format(e.message))
return cls(sections)
@classmethod
def from_ass_file(cls, path):
try:
with codecs.open(path, encoding='utf-8-sig') as script:
return cls.from_ass_stream(script)
except IOError:
raise PrassError("Script {0} not found".format(path))
@classmethod
def from_srt_stream(cls, file_object):
styles_section = StylesSection()
events_section = EventsSection()
for srt_event in file_object.read().replace('\r\n', '\n').split('\n\n'):
if not srt_event:
continue
lines = srt_event.split('\n', 2)
times = lines[1].split('-->')
if 'X' in times[1] or 'Y' in times[1]:
times[1], box = times[1].strip().split(' ', 1)
else:
box = False
text=srt_line_to_ass(lines[2])
events_section.events.append(AssEvent(
start=parse_srt_time(times[0].rstrip()),
end=parse_srt_time(times[1].lstrip()),
text=text
))
styles_section.styles[u'Default'] = AssStyle(u'Default', 'Arial,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,2,2,2,10,10,10,1')
script_info = ScriptInfoSection()
script_info.parse_line(u'; Script converted by Prass')
script_info.set_resolution(384, 288)
return cls([
(SCRIPT_INFO_SECTION, script_info),
(STYLES_SECTION, styles_section),
(EVENTS_SECTION, events_section),
])
def to_ass_stream(self, file_object):
lines = []
for name, section in self._sections_list:
lines.append(name)
lines.extend(section.format_section())
lines.append(u"")
file_object.write("\n".join(lines))
def to_ass_file(self, path):
with codecs.open(path, encoding='utf-8-sig', mode='w') as script:
self.to_ass_stream(script)
def scale_to_reference(self, reference, forced_resolution=None):
src_width, src_height = self._find_section(SCRIPT_INFO_SECTION).get_resolution()
scale_border_and_shadow = self._find_section(SCRIPT_INFO_SECTION).get_scaled_border_property()
if forced_resolution:
dst_width, dst_height = forced_resolution
else:
dst_width, dst_height = reference._find_section(SCRIPT_INFO_SECTION).get_resolution()
if all((src_width, src_height, dst_width, dst_height)):
for style in itervalues(self._styles):
style.resample(src_width, src_height, dst_width, dst_height, scale_border_and_shadow)
self._find_section(SCRIPT_INFO_SECTION).set_resolution(dst_width, dst_height)
else:
logging.info("Couldn't determine resolution, resampling disabled")
def append_styles(self, other_script, clean, resample, forced_resolution=None):
if clean:
self._styles.clear()
if resample:
other_script_resampled = copy.deepcopy(other_script)
other_script_resampled.scale_to_reference(self, forced_resolution)
if forced_resolution:
self.scale_to_reference(self, forced_resolution)
else:
other_script_resampled = other_script
for style in itervalues(other_script_resampled._styles):
self._styles[style.name] = copy.deepcopy(style)
def sort_events(self, key, descending):
self._events.sort(key=key, reverse=descending)
def tpp(self, styles, lead_in, lead_out, max_overlap, max_gap, adjacent_bias,
keyframes_list, timecodes, kf_before_start, kf_after_start, kf_before_end, kf_after_end):
def get_closest_kf(frame, keyframes):
idx = bisect.bisect_left(keyframes, frame)
if idx == len(keyframes):
return keyframes[-1]
if idx == 0 or keyframes[idx] - frame < frame - (keyframes[idx-1]):
return keyframes[idx]
return keyframes[idx-1]
events_iter = (e for e in self._events if not e.is_comment)
if styles:
styles = set(s.lower() for s in styles)
events_iter = (e for e in events_iter if e.style.lower() in styles)
events_list = sorted(events_iter, key=lambda x: x.start)
broken = next((e for e in events_list if e.start > e.end), None)
if broken:
raise PrassError("One of the lines in the file ({0}) has negative duration. Aborting.".format(broken))
if lead_in:
sorted_by_end = sorted(events_list, key=lambda x: x.end)
for idx, event in enumerate(sorted_by_end):
initial = max(event.start - lead_in, 0)
for other in reversed(sorted_by_end[:idx]):
if other.end <= initial:
break
if not event.collides_with(other):
initial = max(initial, other.end)
event.start = initial
if lead_out:
for idx, event in enumerate(events_list):
initial = event.end + lead_out
for other in events_list[idx:]:
if other.start > initial:
break
if not event.collides_with(other):
initial = min(initial, other.start)
event.end = initial
if max_overlap or max_gap:
bias = adjacent_bias / 100.0
for previous, current in zip(events_list, events_list[1:]):
distance = current.start - previous.end
if (distance < 0 and -distance <= max_overlap) or (distance > 0 and distance <= max_gap):
new_time = previous.end + distance * bias
current.start = new_time
previous.end = new_time
if kf_before_start or kf_after_start or kf_before_end or kf_after_end:
for event in events_list:
start_frame = timecodes.get_frame_number(event.start, timecodes.TIMESTAMP_START)
end_frame = timecodes.get_frame_number(event.end, timecodes.TIMESTAMP_END)
closest_frame = get_closest_kf(start_frame, keyframes_list)
closest_time = timecodes.get_frame_time(closest_frame, timecodes.TIMESTAMP_START)
if (end_frame > closest_frame >= start_frame and closest_time - event.start <= kf_after_start) or \
(closest_frame <= start_frame and event.start - closest_time <= kf_before_start):
event.start = max(0, closest_time)
closest_frame = get_closest_kf(end_frame, keyframes_list) - 1
closest_time = timecodes.get_frame_time(closest_frame, timecodes.TIMESTAMP_END)
if (start_frame < closest_frame <= end_frame and event.end - closest_time <= kf_before_end) or \
(closest_frame >= end_frame and closest_time - event.end <= kf_after_end):
event.end = closest_time
def cleanup(self, drop_comments, drop_empty_lines, drop_unused_styles, drop_actors, drop_effects, drop_spacing, drop_sections):
if drop_comments:
self._events = [e for e in self._events if not e.is_comment]
if drop_empty_lines:
self._events = [e for e in self._events if e.text]
if drop_unused_styles:
used_styles = set()
for event in self._events:
used_styles.add(event.style)
for override_block in re.findall(r"{([^{}]*\\r[^{}]*)}", event.text):
for style in re.findall(r"\\r([^}\\]+)", override_block):
used_styles.add(style)
for style_name in list(iterkeys(self._styles)):
if style_name not in used_styles:
del self._styles[style_name]
if drop_actors:
for event in self._events:
event.actor = ''
if drop_effects:
for event in self._events:
event.effect = ''
if drop_spacing:
for event in self._events:
event.text = re.sub(r"(\s|\\N|\\n)+", " ", event.text)
if drop_sections:
self._sections_list = [x for x in self._sections_list if x[0] not in set(drop_sections)]
def shift(self, shift, shift_start, shift_end, multiplier):
for event in self._events:
if shift_start:
event.start = max(event.start + shift, 0)
if shift_end:
event.end = max(event.end + shift, 0)
if multiplier != 1:
event.start *= multiplier
event.end *= multiplier
| |
"""Expression Intrinsics and math functions in TVM."""
# pylint: disable=redefined-builtin
from __future__ import absolute_import as _abs
from ._ffi.function import register_func as _register_func
from . import make as _make
from .api import convert, const
from .expr import Call as _Call
from .schedule import Buffer as _Buffer
def _pack_buffer(buf):
"""Build intrinsics that packs the buffer.
"""
assert buf.shape
shape = _make.Call("handle", "tvm_stack_make_shape", buf.shape,
_Call.Intrinsic, None, 0)
strides = _make.Call("handle", "tvm_stack_make_shape", buf.strides,
_Call.Intrinsic, None, 0) if buf.strides else 0
pack_args = [buf.data,
shape,
strides,
len(buf.shape),
const(0, dtype=buf.dtype),
buf.elem_offset]
return _make.Call("handle", "tvm_stack_make_array",
pack_args, _Call.Intrinsic, None, 0)
def call_packed(*args):
"""Build expression by call an external packed function.
The argument to packed function can be Expr or Buffer.
The argument is the corresponding POD type when Expr is presented.
When the argument is Buffer, the corresponding PackedFunc
will recieve an TVMArrayHandle whose content is valid during the callback period.
If the PackedFunc is a python callback, then the corresponding argument is NDArray.
Parameters
----------
args : list of Expr or Buffer.
Positional arguments.
Returns
-------
call : Expr
The call expression.
See Also
--------
tvm.extern : Create tensor with extern function call.
"""
call_args = [_pack_buffer(x) if isinstance(x, _Buffer) else x for x in args]
return _make.Call(
"int32", "tvm_call_packed", call_args, _Call.Intrinsic, None, 0)
def call_pure_intrin(dtype, func_name, *args):
"""Build expression by calling a pure intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
args = convert(args)
return _make.Call(
dtype, func_name, convert(args), _Call.PureIntrinsic, None, 0)
def call_intrin(dtype, func_name, *args):
"""Build expression by calling an intrinsic function.
Intrinsics can be overloaded with multiple data types via
the intrinsic translation rule.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The intrinsic function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
args = convert(args)
return _make.Call(
dtype, func_name, convert(args), _Call.Intrinsic, None, 0)
def call_pure_extern(dtype, func_name, *args):
"""Build expression by calling a pure extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
return _make.Call(
dtype, func_name, convert(args), _Call.PureExtern, None, 0)
def call_extern(dtype, func_name, *args):
"""Build expression by calling a extern function.
Parameters
----------
dtype : str
The data type of the result.
func_name: str
The extern function name.
args : list
Positional arguments.
Returns
-------
call : Expr
The call expression.
"""
return _make.Call(
dtype, func_name, convert(args), _Call.Extern, None, 0)
def call_llvm_intrin(dtype, name, *args):
"""Build expression by calling an llvm intrinsic function
Parameters
----------
dtype : str
The data type of the result.
name : str
The name of the llvm intrinsic function.
args : list
Poistional arguments.
Returns
-------
call : Expr
The call expression.
"""
import tvm
llvm_id = tvm.codegen.llvm_lookup_intrinsic_id(name)
assert llvm_id != 0, "%s is not an LLVM intrinsic" % name
return call_pure_intrin(dtype, 'llvm_intrin', tvm.const(llvm_id, 'uint32'), *args)
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x)
def tanh(x):
"""Take hyperbolic tanh of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "tanh", x)
def sigmoid(x):
"""Quick function to get sigmoid
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "sigmoid", x)
def log(x):
"""Take log of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "log", x)
def sqrt(x):
"""Take log of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "sqrt", x)
def floor(x):
"""Take floor of float input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "floor", x)
def ceil(x):
"""Take ceil of float input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "ceil", x)
def trunc(x):
"""Get truncated value of the input.
The truncated value of the scalar x is the
nearest integer i which is closer to zero than x is.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "trunc", x)
def abs(x):
"""Get absolute value of the input element-wise.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return _make.abs(x)
def round(x):
"""Round elements of the array to the nearest integer.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "round", x)
def power(x, y):
"""x power y
Parameters
----------
x : Expr
Input argument.
y : Expr
The exponent
Returns
-------
z : Expr
The result.
"""
return call_pure_intrin(x.dtype, "pow", x, y)
def popcount(x):
"""Count the number of set bits in input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "popcount", x)
# Intrinsic rule related code
def register_intrin_rule(target, intrin, f=None, override=False):
"""Register an intrinsic function generation rule.
Intrinsic generation rules are callback functions for
code generator to get device specific calls.
This function simply translates to.
:code:`register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)`
TVM may already pre-register intrinsic rules in the backend.
However, user can use this function to change the intrinsic translation
behavior or add new intrinsic rules during runtime.
Parameters
----------
target : str
The name of codegen target.
intrin : str
The name of the instrinsic.
f : function, optional
The function to be registered.
override: boolean optional
Whether override existing entry.
Returns
-------
fregister : function
Register function if f is not specified.
Examples
--------
The following code registers exp expansion rule for opencl.
.. code-block:: python
register_intrin_rule("opencl", "exp", my_exp_rule, override=True)
"""
return _register_func("tvm.intrin.rule.%s.%s" % (target, intrin), f, override)
def _rule_float_suffix(op):
"""Intrinsic rule: Add float suffix if it is float32.
This is an example intrinsic generation rule.
Parameters
----------
op : Expr
The call expression of original intrinsic.
Returns
-------
ret : Expr
The translated intrinsic rule.
Return same op if no translation is possible.
See Also
--------
register_intrin_rule : The registeration function for intrin rule.
"""
if op.dtype == "float32":
return call_pure_extern(op.dtype, "%sf" % op.name, *op.args)
elif op.dtype == "float64":
return call_pure_extern(op.dtype, op.name, *op.args)
return op
def _rule_float_direct(op):
"""Intrinsic rule: Directly call pure extern function for floats.
This is an example intrinsic generation rule.
Parameters
----------
op : Expr
The call expression of original intrinsic.
Returns
-------
ret : Expr
The translated intrinsic rule.
Return same op if no translation is possible.
See Also
--------
register_intrin_rule : The registeration function for intrin rule.
"""
if str(op.dtype).startswith("float"):
return call_pure_extern(op.dtype, op.name, *op.args)
return None
# opencl pattern for exp
register_intrin_rule("opencl", "exp", _rule_float_direct, override=True)
# default pattern for exp
register_intrin_rule("default", "exp", _rule_float_suffix, override=True)
# default pattern for sigmoid
register_intrin_rule("default", "sigmoid", lambda op: 1.0 / (1.0 + exp(-op.args[0])))
| |
import sys, os, inspect
from abc import ABCMeta, abstractmethod
from ctypes import *
import math
MY_DIR = os.path.dirname(os.path.abspath(inspect.getframeinfo(inspect.currentframe())[0]))
HELPER_DIR = os.path.abspath(os.path.join(MY_DIR, '..', 'helpers'))
sys.path.append(HELPER_DIR)
import dhlog
from dhcore import *
from dhapp import *
class _API:
is_init = False
@staticmethod
def init(debug = False):
if _API.is_init:
return
postfix = ''
if debug:
postfix = '-dbg'
if sys.platform == 'win32':
shlib = 'dheng' + postfix + '.dll'
elif sys.platform == 'linux':
shlib = 'libdheng' + postfix + '.so'
# load library
try:
dhenglib = cdll.LoadLibrary(shlib)
except:
dhlog.Log.warn(str(sys.exc_info()[1]))
dhlog.Log.fatal('could not load dynamic library %s' % shlib)
sys.exit(-1)
dhlog.Log.msgline('module "%s" loaded' % shlib, dhlog.TERM_GREEN)
# engine.h
_API.eng_init = dhenglib.eng_init
_API.eng_init.restype = c_int
_API.eng_init.argtypes = [POINTER(InitParams)]
_API.eng_release = dhenglib.eng_release
_API.eng_update = dhenglib.eng_update
_API.eng_send_guimsgs = dhenglib.eng_send_guimsgs
_API.eng_send_guimsgs.argtypes = [c_byte, c_uint]
_API.eng_get_frametime = dhenglib.eng_get_frametime
_API.eng_get_frametime.restype = c_float
_API.eng_get_sharedir = dhenglib.eng_get_sharedir
_API.eng_get_sharedir.restype = c_char_p
# scene-mgr.h
_API.scn_create_scene = dhenglib.scn_create_scene
_API.scn_create_scene.restype = c_uint
_API.scn_create_scene.argtypes = [c_char_p]
_API.scn_destroy_scene = dhenglib.scn_destroy_scene
_API.scn_destroy_scene.argtypes = [c_uint]
_API.scn_findscene = dhenglib.scn_findscene
_API.scn_findscene.restype = c_uint
_API.scn_findscene.argtypes = [c_char_p]
_API.scn_create_obj = dhenglib.scn_create_obj
_API.scn_create_obj.restype = c_void_p
_API.scn_create_obj.argtypes = [c_uint, c_char_p, c_uint]
_API.scn_destroy_obj = dhenglib.scn_destroy_obj
_API.scn_destroy_obj.argtypes = [c_void_p]
_API.scn_findobj = dhenglib.scn_findobj
_API.scn_findobj.restype = c_uint
_API.scn_findobj.argtypes = [c_uint, c_char_p]
_API.scn_getobj = dhenglib.scn_getobj
_API.scn_getobj.restype = c_void_p
_API.scn_getobj.argtypes = [c_uint, c_uint]
_API.scn_clear = dhenglib.scn_clear
_API.scn_clear.argtypes = [c_uint]
_API.scn_setactive = dhenglib.scn_setactive
_API.scn_setactive.argtypes = [c_uint]
_API.scn_getactive = dhenglib.scn_getactive
_API.scn_getactive.restype = c_uint
_API.scn_setsize = dhenglib.scn_setsize
_API.scn_setsize.argtypes = [c_uint, POINTER(Vec3), POINTER(Vec3)]
_API.scn_getsize = dhenglib.scn_getsize
_API.scn_getsize.argtypes = [c_uint, POINTER(Vec3), POINTER(Vec3)]
# gfx.h
_API.gfx_set_gridcallback = dhenglib.gfx_set_gridcallback
_API.gfx_set_gridcallback.argtypes = [c_int]
_API.gfx_resize = dhenglib.gfx_resize
_API.gfx_resize.argtypes = [c_uint, c_uint]
# cmp-mgr.h
_API.cmp_findtype = dhenglib.cmp_findtype
_API.cmp_findtype.restype = c_void_p
_API.cmp_findtype.argtypes = [c_ushort]
_API.cmp_getname = dhenglib.cmp_getname
_API.cmp_getname.restype = c_char_p
_API.cmp_getname.argtypes = [c_void_p]
_API.cmp_create_instance = dhenglib.cmp_create_instance
_API.cmp_create_instance.restype = c_ulonglong
_API.cmp_create_instance.argtypes = [c_void_p, c_void_p, c_uint, c_ulonglong, c_uint]
_API.cmp_destroy_instance = dhenglib.cmp_destroy_instance
_API.cmp_destroy_instance.argtypes = [c_ulonglong]
_API.cmp_findinstance_bytype_inobj = dhenglib.cmp_findinstance_bytype_inobj
_API.cmp_findinstance_bytype_inobj.restype = c_ulonglong
_API.cmp_findinstance_bytype_inobj.argtypes = [c_void_p, c_ushort]
_API.cmp_debug_add = dhenglib.cmp_debug_add
_API.cmp_debug_add.argtypes = [c_ulonglong]
_API.cmp_debug_remove = dhenglib.cmp_debug_remove
_API.cmp_debug_remove.argtypes = [c_ulonglong]
_API.cmp_value_set4f = dhenglib.cmp_value_set4f
_API.cmp_value_set4f.restype = c_int
_API.cmp_value_set4f.argtypes = [c_ulonglong, c_char_p, POINTER(c_float)]
_API.cmp_value_get4f = dhenglib.cmp_value_get4f
_API.cmp_value_get4f.restype = c_int
_API.cmp_value_get4f.argtypes = [POINTER(c_float), c_ulonglong, c_char_p]
_API.cmp_value_setf = dhenglib.cmp_value_setf
_API.cmp_value_setf.restype = c_int
_API.cmp_value_setf.argtypes = [c_ulonglong, c_char_p, c_float]
_API.cmp_value_getf = dhenglib.cmp_value_getf
_API.cmp_value_getf.restype = c_int
_API.cmp_value_getf.argtypes = [POINTER(c_float), c_ulonglong, c_char_p]
_API.cmp_value_setb = dhenglib.cmp_value_setb
_API.cmp_value_setb.restype = c_int
_API.cmp_value_setb.argtypes = [c_ulonglong, c_char_p, c_int]
_API.cmp_value_getb = dhenglib.cmp_value_getb
_API.cmp_value_getb.restype = c_int
_API.cmp_value_getb.argtypes = [POINTER(c_int), c_ulonglong, c_char_p]
_API.cmp_value_setui = dhenglib.cmp_value_setui
_API.cmp_value_setui.restype = c_int
_API.cmp_value_setui.argtypes = [c_ulonglong, c_char_p, c_uint]
_API.cmp_value_getui = dhenglib.cmp_value_getui
_API.cmp_value_getui.restype = c_int
_API.cmp_value_getui.argtypes = [POINTER(c_uint), c_ulonglong, c_char_p]
_API.cmp_value_set3f = dhenglib.cmp_value_set3f
_API.cmp_value_set3f.restype = c_int
_API.cmp_value_set3f.argtypes = [c_ulonglong, c_char_p, POINTER(c_float)]
_API.cmp_value_get3f = dhenglib.cmp_value_get3f
_API.cmp_value_get3f.restype = c_int
_API.cmp_value_get3f.argtypes = [POINTER(c_float), c_ulonglong, c_char_p]
_API.cmp_value_set2f = dhenglib.cmp_value_set2f
_API.cmp_value_set2f.restype = c_int
_API.cmp_value_set2f.argtypes = [c_ulonglong, c_char_p, POINTER(c_float)]
_API.cmp_value_get2f = dhenglib.cmp_value_get2f
_API.cmp_value_get2f.restype = c_int
_API.cmp_value_get2f.argtypes = [POINTER(c_float), c_ulonglong, c_char_p]
_API.cmp_value_sets = dhenglib.cmp_value_sets
_API.cmp_value_sets.restype = c_int
_API.cmp_value_sets.argtypes = [c_ulonglong, c_char_p, c_char_p]
_API.cmp_value_gets = dhenglib.cmp_value_gets
_API.cmp_value_gets.restype = c_int
_API.cmp_value_gets.argtypes = [c_char_p, c_uint, c_ulonglong, c_char_p]
# cmp-xform.h
_API.cmp_xform_setpos = dhenglib.cmp_xform_setpos
_API.cmp_xform_setpos.argtypes = [c_void_p, POINTER(Vec3)]
_API.cmp_xform_setrot_quat = dhenglib.cmp_xform_setrot_quat
_API.cmp_xform_setrot_quat.argtypes = [c_void_p, POINTER(Quat)]
_API.cmp_xform_getpos = dhenglib.cmp_xform_getpos
_API.cmp_xform_getpos.restype = POINTER(Vec3)
_API.cmp_xform_getpos.argtypes = [c_void_p, POINTER(Vec3)]
_API.cmp_xform_getrot = dhenglib.cmp_xform_getrot
_API.cmp_xform_getrot.restype = POINTER(Quat)
_API.cmp_xform_getrot.argtypes = [c_void_p, POINTER(Quat)]
# cmp-anim.h
_API.cmp_anim_getclipname = dhenglib.cmp_anim_getclipname
_API.cmp_anim_getclipname.restype = c_char_p
_API.cmp_anim_getclipname.argtypes = [c_ulonglong, c_uint]
_API.cmp_anim_isplaying = dhenglib.cmp_anim_isplaying
_API.cmp_anim_isplaying.restype = c_uint
_API.cmp_anim_isplaying.argtypes = [c_ulonglong]
_API.cmp_anim_getclipcnt = dhenglib.cmp_anim_getclipcnt
_API.cmp_anim_getclipcnt.restype = c_uint
_API.cmp_anim_getclipcnt.argtypes = [c_ulonglong]
_API.cmp_anim_getframecnt = dhenglib.cmp_anim_getframecnt
_API.cmp_anim_getframecnt.restype = c_uint
_API.cmp_anim_getframecnt.argtypes = [c_ulonglong]
_API.cmp_anim_getfps = dhenglib.cmp_anim_getfps
_API.cmp_anim_getfps.restype = c_uint
_API.cmp_anim_getfps.argtypes = [c_ulonglong]
_API.cmp_anim_getcurframe = dhenglib.cmp_anim_getcurframe
_API.cmp_anim_getcurframe.restype = c_uint
_API.cmp_anim_getcurframe.argtypes = [c_ulonglong]
_API.cmp_anim_getbonecnt = dhenglib.cmp_anim_getbonecnt
_API.cmp_anim_getbonecnt.restype = c_uint
_API.cmp_anim_getbonecnt.argtypes = [c_ulonglong]
_API.cmp_anim_getbonename = dhenglib.cmp_anim_getbonename
_API.cmp_anim_getbonename.restype = c_char_p
_API.cmp_anim_getbonename.argtypes = [c_ulonglong, c_uint]
# cmp-animchar.h
_API.cmp_animchar_getparamtype = dhenglib.cmp_animchar_getparamtype
_API.cmp_animchar_getparamtype.restype = c_uint
_API.cmp_animchar_getparamtype.argtypes = [c_ulonglong, c_char_p]
_API.cmp_animchar_getparamb = dhenglib.cmp_animchar_getparamb
_API.cmp_animchar_getparamb.restype = c_int
_API.cmp_animchar_getparamb.argtypes = [c_ulonglong, c_char_p]
_API.cmp_animchar_getparami = dhenglib.cmp_animchar_getparami
_API.cmp_animchar_getparami.restype = c_int
_API.cmp_animchar_getparami.argtypes = [c_ulonglong, c_char_p]
_API.cmp_animchar_getparamf = dhenglib.cmp_animchar_getparamf
_API.cmp_animchar_getparamf.restype = c_float
_API.cmp_animchar_getparamf.argtypes = [c_ulonglong, c_char_p]
_API.cmp_animchar_setparamb = dhenglib.cmp_animchar_setparamb
_API.cmp_animchar_setparamb.argtypes = [c_ulonglong, c_char_p, c_int]
_API.cmp_animchar_setparami = dhenglib.cmp_animchar_setparami
_API.cmp_animchar_setparami.argtypes = [c_ulonglong, c_char_p, c_int]
_API.cmp_animchar_setparamf = dhenglib.cmp_animchar_setparamf
_API.cmp_animchar_setparamf.argtypes = [c_ulonglong, c_char_p, c_float]
# world-mgr.h
_API.wld_set_var = dhenglib.wld_set_var
_API.wld_set_var.argtypes = [c_uint, c_uint, POINTER(Variant)]
_API.wld_get_var = dhenglib.wld_get_var
_API.wld_get_var.restype = POINTER(Variant)
_API.wld_get_var.argtypes = [c_uint, c_uint]
_API.wld_find_var = dhenglib.wld_find_var
_API.wld_find_var.restype = c_uint
_API.wld_find_var.argtypes = [c_uint, c_char_p]
_API.wld_find_section = dhenglib.wld_find_section
_API.wld_find_section.restype = c_uint
_API.wld_find_section.argtypes = [c_char_p]
_API.is_init = True
class Engine:
is_init = False
__active_scene = None
@staticmethod
def send_keys(ch, vkey):
_API.eng_send_guimsgs(ch, vkey)
@staticmethod
def update():
if Engine.is_init:
ft = _API.eng_get_frametime()
Input.update(ft)
World.update_objects(ft)
if Engine.__active_scene != None:
Engine.__active_scene.update_objects(ft)
_API.eng_update()
@staticmethod
def init(conf):
r = _API.eng_init(conf.params)
if IS_FAIL(r):
raise Exception(Errors.last_error())
_API.gfx_set_gridcallback(c_int(True))
# register components
Component.register('transform', 0x7887, Transform)
Component.register('camera', 0x8b72, Camera)
Component.register('bounds', 0x8bbd, Bounds)
Component.register('model', 0x4e9b, Model)
Component.register('animation', 0x068b, Animation)
Component.register('animator', 0x99e4, Animator)
Component.register('rigidbody', 0xbc2d, RigidBody)
Component.register('light', 0x4e0e, Light)
Engine.is_init = True
@staticmethod
def release():
_API.eng_release()
Engine.is_init = False
@staticmethod
def set_active_scene(scene, caller_scene=False):
if not caller_scene:
scene.activate()
else:
Engine.__active_scene = scene
@staticmethod
def get_share_dir():
return _API.eng_get_sharedir().decode()
@staticmethod
def resize_view(width, height):
if Engine.is_init:
_API.gfx_resize(c_uint(width), c_uint(height))
class Component:
__cmps = dict()
def __init__(self, name, cmp_type, owner_obj):
self._name = name
self._type = cmp_type
self._owner_obj = owner_obj
c = _API.cmp_findtype(c_ushort(cmp_type))
if c == None:
raise Exception('specified component "%s" does not exist' % name)
self._cmp = _API.cmp_findinstance_bytype_inobj(owner_obj.objptr, c_ushort(cmp_type))
if self._cmp == INVALID_HANDLE:
self._cmp = _API.cmp_create_instance(c, owner_obj.objptr, c_uint(0),
c_ulonglong(INVALID_HANDLE), c_uint(0))
if self._cmp == INVALID_HANDLE:
raise Exception('could not create component "%s"' % name)
def __get_internalname(self):
c = _API.cmp_findtype(c_ushort(self.type))
if c != None:
return _API.cmp_getname(c)
internal_name = property(__get_internalname)
def __get_name(self):
return self._name
name = property(__get_name)
def __get_internaltype(self):
return self._type
internal_type = property(__get_internaltype)
def __get_ownerobj(self):
return self._owner_obj
owner_obj = property(__get_ownerobj)
def destroy(self):
if self._cmp != INVALID_HANDLE:
_API.cmp_destroy_instance(self._cmp)
self_cmp = INVALID_HANDLE
def debug(self, dbg = True):
if dbg: _API.cmp_debug_add(self._cmp)
else: _API.cmp_debug_remove(self._cmp)
@staticmethod
def register(name, cmp_type, cls_type):
Component.__cmps[name] = (cmp_type, cls_type)
@staticmethod
def create(name, owner_obj):
if name in Component.__cmps:
citem = Component.__cmps[name]
return citem[1](name, citem[0], owner_obj)
else:
raise Exception('component by name "%s" is not registered' % name)
class Transform(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __set_position(self, pos):
_API.cmp_xform_setpos(self._owner_obj.objptr, byref(pos))
def __get_position(self):
pos = Vec3()
_API.cmp_xform_getpos(self._owner_obj.objptr, byref(pos))
return pos
position = property(__get_position, __set_position)
def __set_rotation(self, quat):
_API.cmp_xform_setrot_quat(self._owner_obj.objptr, byref(quat))
def __get_rotation(self):
quat = Quat()
_API.cmp_xform_getrot(self._owner_obj.objptr, byref(quat))
return quat
rotation = property(__get_rotation, __set_rotation)
class Bounds(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __set_sphere(self, s):
sft = c_float*4
sf = sft()
sf[0].value = s.x
sf[1].value = s.y
sf[2].value = s.z
sf[3].value = s.w
_API.cmp_value_set4f(self._cmp, to_cstr('sphere'), sf)
def __get_sphere(self):
sf = c_float*4
_API.cmp_value_get4f(sf, self._cmp, to_cstr('sphere'))
return Vec4(sf[0].value, sf[1].value, sf[2].value, sf[3].value)
sphere = property(__get_sphere, __set_sphere)
class Camera(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_fov(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('fov'))
return f.value
def __set_fov(self, fov):
_API.cmp_value_setf(self._cmp, to_cstr('fov'), c_float(fov))
fov = property(__get_fov, __set_fov)
def __get_nearclip(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('near_distance'))
return f.value
def __set_nearclip(self, d):
_API.cmp_value_setf(self._cmp, to_cstr('near_distance'), c_float(fov))
near_clip = property(__get_nearclip, __set_nearclip)
def __get_farclip(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('far_distance'))
return f.value
def __set_farclip(self, d):
_API.cmp_value_setf(self._cmp, to_cstr('far_distance'), c_float(fov))
far_clip = property(__get_farclip, __set_farclip)
def __get_maxpitch(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('max_pitch'))
return f.value
def __set_maxpitch(self, pitch):
_API.cmp_value_setf(self._cmp, to_cstr('max_pitch'), c_float(fov))
max_pitch = property(__get_maxpitch, __set_maxpitch)
def __get_minpitch(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('min_pitch'))
return f.value
def __set_minpitch(self, pitch):
_API.cmp_value_setf(self._cmp, to_cstr('min_pitch'), c_float(fov))
min_pitch = property(__get_minpitch, __set_minpitch)
def __get_active(self):
b = c_int(0)
_API.cmp_value_getb(byref(b), self._cmp, to_cstr('active'))
return bool(b.value)
def __set_active(self, value):
_API.cmp_value_setb(self._cmp, to_cstr('active'), c_int(value))
active = property(__get_active, __set_active)
class Model(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_filepath(self):
s = create_string_buffer(128)
_API.cmp_value_gets(s, c_uint(128), self._cmp, to_cstr('filepath'))
return s.value.decode()
def __set_filepath(self, fpath):
r = _API.cmp_value_sets(self._cmp, to_cstr('filepath'), to_cstr(fpath))
if IS_FAIL(r):
raise Exception(Errors.last_error())
filepath = property(__get_filepath, __set_filepath)
def __get_excludeshadows(self):
b = c_int(0)
_API.cmp_value_getb(byref(b), self._cmp, to_cstr('exclude_shadows'))
return bool(b.value)
def __set_excludeshadows(self, excl):
_API.cmp_value_setb(self._cmp, to_cstr('exclude_shadows'), c_uint(excl))
exclude_shadows = property(__get_excludeshadows, __set_excludeshadows)
class Animation(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_filepath(self):
s = create_string_buffer(128)
_API.cmp_value_gets(s, c_uint(128), self._cmp, to_cstr('filepath'))
return s.value.decode()
def __set_filepath(self, fpath):
r = _API.cmp_value_sets(self._cmp, to_cstr('filepath'), to_cstr(fpath))
if IS_FAIL(r):
raise Exception(Errors.last_error())
filepath = property(__get_filepath, __set_filepath)
def __get_playrate(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('play_rate'))
return f.value
def __set_playrate(self, rate):
_API.cmp_value_setf(self._cmp, to_cstr('play_rate'), c_float(rate))
play_rate = property(__get_playrate, __set_playrate)
def __get_clipname(self):
s = create_string_buffer(128)
_API.cmp_value_gets(byref(s), self._cmp, to_cstr('clip_name'))
return s.value.decode()
def __set_clipname(self, clip_name):
_API.cmp_value_sets(self._cmp, to_cstr('clip_name'), to_cstr(clip_name))
clip_name = property(__get_clipname, __set_clipname)
def __get_frame(self):
return _API.cmp_anim_getcurframe(self._cmp)
def __set_frame(self, value):
_API.cmp_value_setui(self._cmp, to_cstr('frame_idx'), c_uint(value))
frame = property(__get_frame, __set_frame)
def __get_isplaying(self):
return bool(_API.cmp_anim_isplaying(self._cmp))
is_playing = property(__get_isplaying)
def __get_clips(self):
clip_cnt = _API.cmp_anim_getbonecnt(self._cmp)
clips = []
for i in range(0, clip_cnt):
clips.append(_API.cmp_anim_getclipname(self._cmp, c_uint(i)).decode())
clips = property(__get_clips)
def __get_bones(self):
bone_cnt = _API.cmp_anim_getbonecnt(self._cmp)
bones = []
for i in range(0, bone_cnt):
bones.append(_API.cmp_anim_getbonename(self._cmp, c_uint(i)).decode())
bones = property(__get_bones)
def __get_fps(self):
return _API.cmp_anim_getfps(self._cmp)
fps = property(__get_fps)
def __get_framecnt(self):
return _API.cmp_anim_getframecnt(self._cmp)
frame_count = property(__get_framecnt)
class Animator(Component):
class ParamType:
UNKNOWN = 0
INT = 1
FLOAT = 2
BOOLEAN = 3
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_filepath(self):
s = create_string_buffer(128)
_API.cmp_value_gets(s, c_uint(128), self._cmp, to_cstr('filepath'))
return s.value.decode()
def __set_filepath(self, fpath):
r = _API.cmp_value_sets(self._cmp, to_cstr('filepath'), to_cstr(fpath))
if IS_FAIL(r):
raise Exception(Errors.last_error())
filepath = property(__get_filepath, __set_filepath)
def get_param(self, name):
cname = to_cstr(name)
t = _API.cmp_animchar_getparamtype(self._cmp, cname)
if t == Animator.ParamType.UNKNOWN:
raise Exception('unknown parameter "%s"' % name)
if t == Animator.ParamType.INT:
return _API.cmp_animchar_getparami(self._cmp, cname)
elif t == Animator.ParamType.FLOAT:
return _API.cmp_animchar_getparamf(self._cmp, cname)
elif t == Animator.ParamType.BOOLEAN:
return bool(_API.cmp_animchar_getparamb(self._cmp, cname))
def set_param(self, name, value):
cname = to_cstr(name)
t = _API.cmp_animchar_getparamtype(self._cmp, cname)
if t == Animator.ParamType.UNKNOWN:
raise Exception('unknown parameter "%s"' % name)
if t == Animator.ParamType.INT:
_API.cmp_animchar_setparami(self._cmp, cname, c_int(value))
elif t == Animator.ParamType.FLOAT:
return _API.cmp_animchar_setparamf(self._cmp, cname, c_float(value))
elif t == Animator.ParamType.BOOLEAN:
return _API.cmp_animchar_setparamb(self._cmp, cname, c_int(value))
class RigidBody(Component):
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_filepath(self):
s = create_string_buffer(128)
_API.cmp_value_gets(s, c_uint(128), self._cmp, to_cstr('filepath'))
return s.value.decode()
def __set_filepath(self, fpath):
r = _API.cmp_value_sets(self._cmp, to_cstr('filepath'), to_cstr(fpath))
if IS_FAIL(r):
raise Exception(Errors.last_error())
filepath = property(__get_filepath, __set_filepath)
def __get_kinematic(self):
b = c_int()
_API.cmp_value_getb(byref(b), self._cmp, to_cstr('kinematic'))
return bool(b.value)
def __set_kinematic(self, value):
_API.cmp_value_setb(self._cmp, to_cstr('kinematic'), c_int(value))
kinematic = property(__get_kinematic, __set_kinematic)
def __get_disablegravity(self):
b = c_int()
_API.cmp_value_getb(byref(b), self._cmp, to_cstr('disablegravity'))
return bool(b.value)
def __set_disablegravity(self, value):
_API.cmp_value_setb(self._cmp, to_cstr('disablegravity'), c_uint(value))
disable_gravity = property(__get_disablegravity, __set_disablegravity)
class Light(Component):
class Type:
POINT = 2
SPOT = 3
def __init__(self, name, cmp_type, owner_obj):
super().__init__(name, cmp_type, owner_obj)
def __get_type(self):
n = c_uint()
_API.cmp_value_getui(byref(n), self._cmp, to_cstr('type'))
return n.value
def __set_type(self, t):
_API.cmp_value_setui(self._cmp, to_cstr('type'), c_uint(t))
type = property(__get_type, __set_type)
def __get_color(self):
fv = c_float*4
_API.cmp_value_get4f(fv, self._cmp, to_cstr('color'))
return Color(fv[0].value, fv[1].value, fv[2].value, fv[3].value)
def __set_color(self, c):
fvt = c_float*4
fv = fvt()
fv[0] = c.r
fv[1] = c.g
fv[2] = c.b
fv[3] = c.a
_API.cmp_value_set4f(self._cmp, to_cstr('color'), fv)
color = property(__get_color, __set_color)
def __get_intensity(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('intensity'))
return f.value
def __set_intensity(self, f):
_API.cmp_value_setf(self._cmp, to_cstr('intensity'), c_float(f))
intensity = property(__get_intensity, __set_intensity)
def __get_attennear(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('atten_near'))
def __set_attennear(self, n):
_API.cmp_value_setf(self._cmp, to_cstr('atten_near'), c_float(n))
atten_near = property(__get_attennear, __set_attennear)
def __get_attenfar(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('atten_far'))
def __set_attenfar(self, n):
_API.cmp_value_setf(self._cmp, to_cstr('atten_far'), c_float(n))
atten_far = property(__get_attenfar, __set_attenfar)
def __get_attennarrow(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('atten_narrow'))
def __set_attennarrow(self, n):
_API.cmp_value_setf(self._cmp, to_cstr('atten_narrow'), c_float(n))
atten_narrow = property(__get_attennarrow, __set_attennarrow)
def __get_attenfar(self):
f = c_float()
_API.cmp_value_getf(byref(f), self._cmp, to_cstr('atten_far'))
def __set_attenfar(self, n):
_API.cmp_value_setf(self._cmp, to_cstr('atten_far'), c_float(n))
atten_far = property(__get_attenfar, __set_attenfar)
def __get_lodscheme(self):
s = create_string_buffer(128)
_API.cmp_value_gets(s, c_uint(128), self._cmp, to_cstr('lod_scheme'))
return s.value.decode()
def __set_lodscheme(self, fpath):
r = _API.cmp_value_sets(self._cmp, to_cstr('lod_scheme'), to_cstr(fpath))
if IS_FAIL(r):
raise Exception(Errors.last_error())
lod_scheme = property(__get_lodscheme, __set_lodscheme)
class Behavior(metaclass=ABCMeta):
@abstractmethod
def init(self, game_obj):
pass
@abstractmethod
def update(self, dt):
pass
class OrbitCam(Behavior):
def init(self, game_obj):
self._obj = game_obj
self._xform = game_obj.transform
self.target = Vec3()
self.sensivity = 0.2
self._distance = 10
self._x = 0
self._y = 0
self._lockpos = Vec2()
self._leftbtn_dwn = False
self._rightbtn_dwn = False
return True
def update(self, dt):
if Input.is_mousedown(MouseKey.LEFT):
mpos = Input.get_mousepos()
if not self._leftbtn_dwn:
self._leftbtn_dwn = True
self._lockpos = mpos.copy()
Input.lock_cursor()
delta_pos = (mpos - self._lockpos)*self.sensivity
self._x += delta_pos.x
self._y += delta_pos.y
self._lockpos = mpos.copy()
else:
self._leftbtn_dwn = False
if Input.is_mousedown(MouseKey.RIGHT):
mpos = Input.get_mousepos()
if not self._rightbtn_dwn:
self._rightbtn_dwn = True
self._lockpos = mpos.copy()
Input.lock_cursor()
delta_pos = (mpos - self._lockpos)*self.sensivity
self._distance += delta_pos.y
self._lockpos = mpos.copy()
else:
self._rightbtn_dwn = False
if (not self._rightbtn_dwn) and (not self._leftbtn_dwn):
Input.unlock_cursor()
q1 = Quat()
q1.from_axis(Vec3(0, 1, 0), Math.to_rad(self._x))
q2 = Quat()
q2.from_axis(Vec3(1, 0, 0), Math.to_rad(self._y))
q = q2*q1
self._xform.rotation = q
m = Matrix3()
m.rotate_quat(q)
self._xform.position = Vec3(0, 0, -self._distance)*m + self.target
class GameObject:
class Type:
MODEL = (1<<0)
PARTICLE = (1<<1)
LIGHT = (1<<2)
DECAL = (1<<3)
CAMERA = (1<<4)
TRIGGER = (1<<5)
ENV = (1<<6)
def __init__(self, scene, obj_name, obj_type):
self.__name = obj_name
self.__cmps = dict()
self.__behaviors = dict()
self.__scene = scene
if scene != None:
self.__obj = _API.scn_create_obj(c_uint(scene.ID), to_cstr(obj_name), c_uint(obj_type))
else:
self.__obj = _API.scn_create_obj(c_uint(INVALID_INDEX), to_cstr(obj_name),
c_uint(obj_type))
if self.__obj == None:
raise Exception('creating object failed')
self.__create_components(obj_type)
def __create_components(self, obj_type):
self.add_component('transform')
self.add_component('bounds')
if obj_type == GameObject.Type.CAMERA:
self.add_component('camera')
elif obj_type == GameObject.Type.MODEL:
self.add_component('model')
elif obj_type == GameObject.Type.LIGHT:
self.add_component('light')
def destroy(self, scene_caller=False):
if Engine.is_init and self.__obj != None:
if scene_caller:
_API.scn_destroy_obj(self.__obj)
self.__obj = None
elif self.__scene != None:
self.__scene.destroy_object(self)
elif self.__scene == None:
World.destroy_object(self)
def update_behaviors(self, dt):
for b in self.__behaviors.values():
b.update(dt)
def add_component(self, name):
if self.__obj == None:
raise Exception('object is NULL')
self.__cmps[name] = Component.create(name, self)
def add_behavior(self, behavior, name):
if behavior.init(self):
self.__behaviors[name] = behavior
def get_behavior(self, name):
try:
return self.__behaviors[name]
except KeyError:
raise
def __getattr__(self, name):
if self.__obj == None:
raise Exception('object is NULL')
try:
return self.__cmps[name]
except KeyError:
raise AttributeError('component "%s" does not exist in GameObject "%s"' % (name, obj_name))
def __get_name(self):
if self.__obj == None:
raise Exception('object is NULL')
return self.__name
name = property(__get_name)
def __get_objptr(self):
return self.__obj
objptr = property(__get_objptr)
def __get_scene(self):
return self.__scene
scene = property(__get_scene)
class _WorldMeta(type):
_vars = dict()
def _find_var(self, section, name):
fullname = str.join('.', (section, name))
if fullname in self._vars:
item = self._vars[fullname]
v = _API.wld_get_var(c_uint(item[0]), c_uint(item[1]))
else:
sec_id = _API.wld_find_section(to_cstr(section))
if sec_id == 0:
raise Exception('section "%s" does not exist' % section)
var_id = _API.wld_find_var(c_uint(sec_id), to_cstr(name))
if var_id == 0:
raise Exception('variable "%s" does not exist' % fullname)
self._vars[fullname] = (sec_id, var_id)
v = _API.wld_get_var(c_uint(sec_id), c_uint(var_id))
return v.contents
def _get_lightdir(self):
return self._find_var('light', 'dir').get_value()
def _set_lightdir(self, v):
self._find_var('light', 'dir').set_value(v)
light_dir = property(_get_lightdir, _set_lightdir)
def _get_lightcolor(self):
return self._find_var('light', 'color').get_value()
def _set_lightcolor(self, c):
self._find_var('light', 'color').set_value(c)
light_color = property(_get_lightcolor, _set_lightcolor)
def _get_lightintensity(self):
return self._find_var('light', 'intensity').get_value()
def _set_lightintensity(self, i):
self._find_var('light', 'intensity').set_value(i)
light_intensity = property(_get_lightintensity, _set_lightintensity)
def _get_ambient_groundcolor(self):
return self._find_var('ambient', 'ground-color').get_value()
def _set_ambient_groundcolor(self, v):
self._find_var('ambient', 'ground-color').set_value(v)
ambient_groundcolor = property(_get_ambient_groundcolor, _set_ambient_groundcolor)
def _get_ambient_skycolor(self):
return self._find_var('ambient', 'sky-color').get_value()
def _set_ambient_skycolor(self, v):
self._find_var('ambient', 'sky-color').set_value(v)
ambient_skycolor = property(_get_ambient_skycolor, _set_ambient_skycolor)
def _get_ambient_skyvector(self):
return self._find_var('ambient', 'sky-vector').get_value()
def _set_ambient_skyvector(self, v):
self._find_var('ambient', 'sky-vector').set_value(v)
ambient_skyvector = property(_get_ambient_skyvector, _set_ambient_skyvector)
def _get_ambient_intensity(self):
return self._find_var('ambient', 'intensity').get_value()
def _set_ambient_intensity(self, v):
self._find_var('ambient', 'intensity').set_value(v)
ambient_intensity = property(_get_ambient_intensity, _set_ambient_intensity)
def _get_physics_gravity(self):
return self._find_var('physics', 'gravity-vector').get_value()
def _set_physics_gravity(self, v):
self._find_var('physics', 'gravity-vector').set_value(v)
physics_gravity = property(_get_physics_gravity, _set_physics_gravity)
class World(metaclass=_WorldMeta):
__objs = dict()
@staticmethod
def create_object(name, obj_type):
try:
if name in World.__objs:
raise Exception('object already exists')
obj = GameObject(None, name, obj_type)
except:
raise
else:
World.__objs[name] = obj
return obj
@staticmethod
def update_objects(dt):
for obj in World.__objs.values():
obj.update_behaviors(dt)
@staticmethod
def destroy_object(obj):
if Engine.is_init:
if type(obj) is GameObject:
if obj.name in World.__objs:
self.__objs[obj.name].destroy(scene_caller=True)
del self.__objs
else:
raise Exception('not a valid object type')
@staticmethod
def clear():
_API.scn_clear(INVALID_INDEX)
class Scene:
__scenes = dict()
def __init__(self, name=None):
# create or fetche named scene
if name in Scene.__scenes:
raise Exception('scene already exists')
self.__id = _API.scn_create_scene(to_cstr(name))
if self.__id == 0:
raise Exception('could not create scene "%s"' % name)
self.__objs = dict()
self.__name = name
Scene.__scenes[name] = self
def destroy(self):
if Engine.is_init and self.__id != 0:
_API.scn_destroy_scene(c_uint(self.__id))
self.__id = 0
def create_object(self, name, obj_type):
if self.__id == 0:
raise Exception('scene is not valid')
try:
if name in self.__objs:
raise Exception('object already exists')
obj = GameObject(self, name, obj_type)
except:
raise
else:
self.__objs[name] = obj
return obj
def create_model(self, name):
if self.__id == 0:
raise Exception('scene is not valid')
return self.create_object(name, GameObject.Type.MODEL)
def update_objects(self, dt):
for obj in self.__objs.values():
obj.update_behaviors(dt)
def destroy_object(self, obj):
if self.__id == 0:
raise Exception('scene is not valid')
if Engine.is_init:
if type(obj) is GameObject:
if obj.name in self.__objs:
self.__objs[obj.name].destroy(scene_caller=True)
del self.__objs
else:
raise Exception('not a valid object type')
def clear(self):
if self.__id == 0:
raise Exception('scene is not valid')
_API.scn_clear(self.__id)
def activate(self):
if self.__id == 0:
raise Exception('scene is not valid')
_API.scn_setactive(self.__id)
Engine.set_active_scene(self, caller_scene=True)
def __get_active(self):
if self.__id == 0:
raise Exception('scene is not valid')
return _API.scn_getactive(self.__id) == self.__id
active = property(__get_active)
def __get_id(self):
if self.__id == 0:
raise Exception('scene is not valid')
return self.__id
ID = property(__get_id)
def find_object(self, name):
if self.__id == 0:
raise Exception('scene is not valid')
return self.__objs[name]
@staticmethod
def find(name):
return __scenes[name]
_API.init(debug = ('--debug' in sys.argv))
| |
"""Tests for hermite_e module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import *
He0 = np.array([ 1 ])
He1 = np.array([ 0 , 1 ])
He2 = np.array([ -1 ,0 , 1 ])
He3 = np.array([ 0 , -3 ,0 , 1 ])
He4 = np.array([ 3 ,0 , -6 ,0 , 1 ])
He5 = np.array([ 0 , 15 ,0 , -10 ,0 , 1 ])
He6 = np.array([ -15 ,0 , 45 ,0 , -15 ,0 , 1 ])
He7 = np.array([ 0 , -105 ,0 , 105 ,0 , -21 ,0 , 1 ])
He8 = np.array([ 105 ,0 , -420 ,0 , 210 ,0 , -28 ,0 , 1 ])
He9 = np.array([ 0 , 945 ,0 , -1260 ,0 , 378 ,0 , -36 ,0 , 1 ])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x) :
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase) :
def test_hermedomain(self) :
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self) :
assert_equal(herme.hermezero, [0])
def test_hermeone(self) :
assert_equal(herme.hermeone, [1])
def test_hermex(self) :
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase) :
x = np.linspace(-3, 3, 100)
def test_hermeadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0,1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self) :
# check values of result
for i in range(5) :
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase) :
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self) :
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
y = [polyval(x, c) for c in Helist]
for i in range(10) :
msg = "At i=%d" % i
ser = np.zeros
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1,0]).shape, dims)
assert_equal(herme.hermeval(x, [1,0,0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2,3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2,3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self) :
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase) :
def test_hermeder(self) :
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self) :
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self) :
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self) :
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:,None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase) :
def test_hermefromroots(self) :
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self) :
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self) :
assert_equal(herme.hermeline(3,4), [3, 4])
def test_herme2poly(self) :
for i in range(10) :
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self) :
for i in range(10) :
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| |
#!/usr/bin/env python3
# Copyright 2021 Anapaya Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import pathlib
import subprocess
import time
from typing import List
import sys
from http import client
from plumbum import cli
from acceptance.common import base
from acceptance.common import docker
from acceptance.common import scion
from python.lib import scion_addr
import toml
logger = logging.getLogger(__name__)
class Test(base.TestBase):
"""
Test that in a topology with multiple ASes, every AS is capable of
requesting renewed certificates. The test verifies that each AS has loaded
the renewed certificate.
The test is split into multiple steps:
1. Start the topology.
2. For each AS in the topology, create a new private key and request
certificate chain renewal. The renewed chain is verified against the
TRC.
3. Remove the previous private key from the control servers.
4. Ensure that the new private key and certificate are loaded by observing
the http endpoint.
5. Check connectivity with an end to end test.
6. Stop all control servers and purge the state. This includes deleting
all databases with cached data, including the path and trust database.
7. Restart control servers and check connectivity again.
"""
end2end = cli.SwitchAttr(
"end2end_integration",
str,
default="./bin/end2end_integration",
help="The end2end_integration binary " +
"(default: ./bin/end2end_integration)",
)
def main(self):
if not self.nested_command:
try:
self.setup()
# Give some time for the topology to start.
time.sleep(10)
self._run()
finally:
self.teardown()
def _run(self):
isd_ases = scion.ASList.load("%s/gen/as_list.yml" %
self.test_state.artifacts).all
cs_configs = self._cs_configs()
logger.info("==> Start renewal process")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as)
logger.info("==> Check key and certificate reloads")
self._check_key_cert(cs_configs)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Shutting down control servers and purging caches")
for container in self.list_containers("scion_sd.*"):
self.test_state.dc("rm", container)
for container in self.list_containers("scion_cs.*"):
self.stop_container(container)
for cs_config in cs_configs:
files = list((pathlib.Path(self.test_state.artifacts) /
"gen-cache").glob("%s*" % cs_config.stem))
for db_file in files:
db_file.unlink()
logger.info("Deleted files: %s" % [file.name for file in files])
logger.info("==> Restart containers")
self.setup_start()
time.sleep(5)
logger.info("==> Check connectivity")
subprocess.run(
[self.end2end, "-d", "-outDir", self.test_state.artifacts],
check=True)
logger.info("==> Backup mode")
for isd_as in isd_ases:
logging.info("===> Start renewal: %s" % isd_as)
self._renewal_request(isd_as, mode="--backup")
def _renewal_request(
self,
isd_as: scion_addr.ISD_AS,
mode: str = "--force",
):
as_dir = self._to_as_dir(isd_as)
docker_dir = pathlib.Path("/share") / self._rel(as_dir)
def read_file(filename: str) -> str:
with open(as_dir / "crypto/as" / filename) as f:
return f.read()
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
old_chain = read_file(chain_name)
old_key = read_file("cp-as.key")
chain = docker_dir / "crypto/as" / chain_name
args = [
chain,
docker_dir / "crypto/as/cp-as.key",
mode,
"--trc",
docker_dir / "certs/ISD1-B1-S1.trc",
"--sciond",
self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c",
"echo $SCION_DAEMON").strip(),
*self._local_flags(isd_as),
]
logger.info("Requesting certificate chain renewal: %s" %
chain.relative_to(docker_dir))
logger.info(
self.execute("tester_%s" % isd_as.file_fmt(), "./bin/scion-pki",
"certificate", "renew", *args))
logger.info("Verify renewed certificate chain")
verify_out = self.execute("tester_%s" % isd_as.file_fmt(),
"./bin/scion-pki", "certificate", "verify",
chain, "--trc",
"/share/gen/trcs/ISD1-B1-S1.trc")
logger.info(str(verify_out).rstrip("\n"))
renewed_chain = read_file(chain_name)
renewed_key = read_file("cp-as.key")
if renewed_chain == old_chain:
raise Exception(
"renewed chain does not differ from previous chain")
if renewed_key == old_key:
raise Exception("renewed key does not differ from previous key")
def _check_key_cert(self, cs_configs: List[pathlib.Path]):
not_ready = [*cs_configs]
for _ in range(5):
logger.info(
"Checking if all control servers have reloaded the key and certificate..."
)
for cs_config in not_ready:
conn = client.HTTPConnection(self._http_endpoint(cs_config))
conn.request("GET", "/signer")
resp = conn.getresponse()
if resp.status != 200:
logger.info("Unexpected response: %d %s", resp.status,
resp.reason)
continue
isd_as = scion_addr.ISD_AS(cs_config.stem[2:-2])
as_dir = self._to_as_dir(isd_as)
chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(),
isd_as.as_file_fmt())
pld = json.loads(resp.read().decode("utf-8"))
if pld["subject_key_id"] != self._extract_skid(
as_dir / "crypto/as" / chain_name):
continue
logger.info(
"Control server successfully loaded new key and certificate: %s"
% self._rel(cs_config))
not_ready.remove(cs_config)
if not not_ready:
break
time.sleep(3)
else:
logger.error(
"Control servers without reloaded key and certificate: %s" %
[cs_config.name for cs_config in not_ready])
sys.exit(1)
def _http_endpoint(self, cs_config: pathlib.Path):
with open(cs_config, "r") as f:
cfg = toml.load(f)
return cfg["metrics"]["prometheus"]
def _extract_skid(self, file: pathlib.Path):
out = subprocess.check_output(
['openssl', 'x509', '-in', file, '-noout', '-text'])
lines = out.splitlines()
for i, v in enumerate(lines):
if v.decode("utf-8").find("Subject Key Identifier") > -1:
skid = lines[i + 1].decode("utf-8").split()[-1].replace(
":", " ").upper()
break
return skid
def _rel(self, path: pathlib.Path):
return path.relative_to(pathlib.Path(self.test_state.artifacts))
def _to_as_dir(self, isd_as: scion_addr.ISD_AS) -> pathlib.Path:
return pathlib.Path("%s/gen/AS%s" %
(self.test_state.artifacts, isd_as.as_file_fmt()))
def _cs_configs(self) -> List[pathlib.Path]:
return list(
pathlib.Path("%s/gen" %
self.test_state.artifacts).glob("AS*/cs*.toml"))
def _local_flags(self, isd_as: scion_addr.ISD_AS) -> List[str]:
return [
"--local",
self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c",
"echo $SCION_LOCAL_ADDR").strip(),
]
if __name__ == "__main__":
base.register_commands(Test)
base.TestBase.test_state = base.TestState(scion.SCIONDocker(),
docker.Compose())
Test.run()
| |
# Copyright (c) 2015-2017 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from s2protocol.decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,6)]), #2
('_int',[(0,14)]), #3
('_int',[(0,22)]), #4
('_int',[(0,32)]), #5
('_choice',[(0,2),{0:('m_uint6',2),1:('m_uint14',3),2:('m_uint22',4),3:('m_uint32',5)}]), #6
('_blob',[(0,8)]), #7
('_int',[(0,8)]), #8
('_struct',[[('m_flags',8,0),('m_major',8,1),('m_minor',8,2),('m_revision',8,3),('m_build',5,4),('m_baseBuild',5,5)]]), #9
('_int',[(0,3)]), #10
('_struct',[[('m_signature',7,0),('m_version',9,1),('m_type',10,2),('m_elapsedGameLoops',5,3)]]), #11
('_fourcc',[]), #12
('_blob',[(0,7)]), #13
('_int',[(0,64)]), #14
('_struct',[[('m_region',8,0),('m_programId',12,1),('m_realm',5,2),('m_name',13,3),('m_id',14,4)]]), #15
('_struct',[[('m_a',8,0),('m_r',8,1),('m_g',8,2),('m_b',8,3)]]), #16
('_int',[(0,2)]), #17
('_struct',[[('m_name',7,0),('m_toon',15,1),('m_race',7,2),('m_color',16,3),('m_control',8,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',17,7),('m_result',17,8)]]), #18
('_array',[(0,5),18]), #19
('_optional',[19]), #20
('_blob',[(0,10)]), #21
('_blob',[(0,11)]), #22
('_struct',[[('m_file',22,0)]]), #23
('_bool',[]), #24
('_int',[(-9223372036854775808,64)]), #25
('_blob',[(0,12)]), #26
('_blob',[(40,0)]), #27
('_array',[(0,6),27]), #28
('_optional',[28]), #29
('_array',[(0,6),22]), #30
('_optional',[30]), #31
('_struct',[[('m_playerList',20,0),('m_title',21,1),('m_difficulty',7,2),('m_thumbnail',23,3),('m_isBlizzardMap',24,4),('m_timeUTC',25,5),('m_timeLocalOffset',25,6),('m_description',26,7),('m_imageFilePath',22,8),('m_mapFileName',22,9),('m_cacheHandles',29,10),('m_miniSave',24,11),('m_gameSpeed',10,12),('m_defaultDifficulty',2,13),('m_modPaths',31,14)]]), #32
('_optional',[8]), #33
('_struct',[[('m_race',33,-1)]]), #34
('_struct',[[('m_team',33,-1)]]), #35
('_struct',[[('m_name',7,-8),('m_randomSeed',5,-7),('m_racePreference',34,-6),('m_teamPreference',35,-5),('m_testMap',24,-4),('m_testAuto',24,-3),('m_examine',24,-2),('m_observe',17,-1)]]), #36
('_array',[(0,5),36]), #37
('_struct',[[('m_lockTeams',24,-12),('m_teamsTogether',24,-11),('m_advancedSharedControl',24,-10),('m_randomRaces',24,-9),('m_battleNet',24,-8),('m_amm',24,-7),('m_ranked',24,-6),('m_noVictoryOrDefeat',24,-5),('m_fog',17,-4),('m_observers',17,-3),('m_userDifficulty',17,-2),('m_clientDebugFlags',14,-1)]]), #38
('_int',[(0,5)]), #39
('_int',[(1,4)]), #40
('_int',[(1,8)]), #41
('_bitarray',[(0,6)]), #42
('_bitarray',[(0,8)]), #43
('_bitarray',[(0,2)]), #44
('_struct',[[('m_allowedColors',42,-5),('m_allowedRaces',43,-4),('m_allowedDifficulty',42,-3),('m_allowedControls',43,-2),('m_allowedObserveTypes',44,-1)]]), #45
('_array',[(0,5),45]), #46
('_struct',[[('m_randomValue',5,-23),('m_gameCacheName',21,-22),('m_gameOptions',38,-21),('m_gameSpeed',10,-20),('m_gameType',10,-19),('m_maxUsers',39,-18),('m_maxObservers',39,-17),('m_maxPlayers',39,-16),('m_maxTeams',40,-15),('m_maxColors',2,-14),('m_maxRaces',41,-13),('m_maxControls',41,-12),('m_mapSizeX',8,-11),('m_mapSizeY',8,-10),('m_mapFileSyncChecksum',5,-9),('m_mapFileName',22,-8),('m_mapAuthorName',7,-7),('m_modFileSyncChecksum',5,-6),('m_slotDescriptions',46,-5),('m_defaultDifficulty',2,-4),('m_cacheHandles',28,-3),('m_isBlizzardMap',24,-2),('m_isPremadeFFA',24,-1)]]), #47
('_optional',[1]), #48
('_optional',[39]), #49
('_struct',[[('m_color',49,-1)]]), #50
('_array',[(0,5),5]), #51
('_array',[(0,9),5]), #52
('_struct',[[('m_control',8,-11),('m_userId',48,-10),('m_teamId',1,-9),('m_colorPref',50,-8),('m_racePref',34,-7),('m_difficulty',2,-6),('m_handicap',0,-5),('m_observe',17,-4),('m_rewards',51,-3),('m_toonHandle',13,-2),('m_licenses',52,-1)]]), #53
('_array',[(0,5),53]), #54
('_struct',[[('m_phase',10,-9),('m_maxUsers',39,-8),('m_maxObservers',39,-7),('m_slots',54,-6),('m_randomSeed',5,-5),('m_hostUserId',48,-4),('m_isSinglePlayer',24,-3),('m_gameDuration',5,-2),('m_defaultDifficulty',2,-1)]]), #55
('_struct',[[('m_userInitialData',37,-3),('m_gameDescription',47,-2),('m_lobbyState',55,-1)]]), #56
('_struct',[[('m_syncLobbyState',56,-1)]]), #57
('_struct',[[('m_name',13,-5)]]), #58
('_blob',[(0,6)]), #59
('_struct',[[('m_name',59,-5)]]), #60
('_struct',[[('m_name',59,-7),('m_type',5,-6),('m_data',13,-5)]]), #61
('_struct',[[('m_type',5,-7),('m_name',59,-6),('m_data',26,-5)]]), #62
('_array',[(0,5),8]), #63
('_struct',[[('m_signature',63,-5)]]), #64
('_struct',[[('m_gameFullyDownloaded',24,-10),('m_developmentCheatsEnabled',24,-9),('m_multiplayerCheatsEnabled',24,-8),('m_syncChecksummingEnabled',24,-7),('m_isMapToMapTransition',24,-6),('m_useAIBeacons',24,-5)]]), #65
('_struct',[[]]), #66
('_struct',[[('m_fileName',22,-9),('m_automatic',24,-8),('m_overwrite',24,-7),('m_name',7,-6),('m_description',21,-5)]]), #67
('_int',[(-2147483648,32)]), #68
('_struct',[[('x',68,-2),('y',68,-1)]]), #69
('_struct',[[('m_point',69,-4),('m_time',68,-3),('m_verb',21,-2),('m_arguments',21,-1)]]), #70
('_struct',[[('m_data',70,-5)]]), #71
('_int',[(0,20)]), #72
('_int',[(0,16)]), #73
('_struct',[[('m_abilLink',73,-3),('m_abilCmdIndex',39,-2),('m_abilCmdData',33,-1)]]), #74
('_optional',[74]), #75
('_null',[]), #76
('_struct',[[('x',72,-3),('y',72,-2),('z',68,-1)]]), #77
('_struct',[[('m_targetUnitFlags',8,-7),('m_timer',8,-6),('m_tag',5,-5),('m_snapshotUnitLink',73,-4),('m_snapshotControlPlayerId',48,-3),('m_snapshotUpkeepPlayerId',48,-2),('m_snapshotPoint',77,-1)]]), #78
('_choice',[(0,2),{0:('None',76),1:('TargetPoint',77),2:('TargetUnit',78),3:('Data',5)}]), #79
('_optional',[5]), #80
('_struct',[[('m_cmdFlags',72,-8),('m_abil',75,-7),('m_data',79,-6),('m_otherUnit',80,-5)]]), #81
('_int',[(0,9)]), #82
('_bitarray',[(0,9)]), #83
('_array',[(0,9),82]), #84
('_choice',[(0,2),{0:('None',76),1:('Mask',83),2:('OneIndices',84),3:('ZeroIndices',84)}]), #85
('_struct',[[('m_unitLink',73,-3),('m_intraSubgroupPriority',8,-2),('m_count',82,-1)]]), #86
('_array',[(0,9),86]), #87
('_struct',[[('m_subgroupIndex',82,-4),('m_removeMask',85,-3),('m_addSubgroups',87,-2),('m_addUnitTags',52,-1)]]), #88
('_struct',[[('m_controlGroupId',1,-6),('m_delta',88,-5)]]), #89
('_struct',[[('m_controlGroupIndex',1,-7),('m_controlGroupUpdate',17,-6),('m_mask',85,-5)]]), #90
('_struct',[[('m_count',82,-6),('m_subgroupCount',82,-5),('m_activeSubgroupIndex',82,-4),('m_unitTagsChecksum',5,-3),('m_subgroupIndicesChecksum',5,-2),('m_subgroupsChecksum',5,-1)]]), #91
('_struct',[[('m_controlGroupId',1,-6),('m_selectionSyncData',91,-5)]]), #92
('_array',[(0,3),68]), #93
('_struct',[[('m_recipientId',1,-6),('m_resources',93,-5)]]), #94
('_struct',[[('m_chatMessage',21,-5)]]), #95
('_int',[(-128,8)]), #96
('_struct',[[('x',68,-3),('y',68,-2),('z',68,-1)]]), #97
('_struct',[[('m_beacon',96,-13),('m_ally',96,-12),('m_flags',96,-11),('m_build',96,-10),('m_targetUnitTag',5,-9),('m_targetUnitSnapshotUnitLink',73,-8),('m_targetUnitSnapshotUpkeepPlayerId',96,-7),('m_targetUnitSnapshotControlPlayerId',96,-6),('m_targetPoint',97,-5)]]), #98
('_struct',[[('m_speed',10,-5)]]), #99
('_struct',[[('m_delta',96,-5)]]), #100
('_struct',[[('m_point',69,-7),('m_unit',5,-6),('m_pingedMinimap',24,-5)]]), #101
('_struct',[[('m_verb',21,-6),('m_arguments',21,-5)]]), #102
('_struct',[[('m_alliance',5,-6),('m_control',5,-5)]]), #103
('_struct',[[('m_unitTag',5,-5)]]), #104
('_struct',[[('m_unitTag',5,-6),('m_flags',8,-5)]]), #105
('_struct',[[('m_conversationId',68,-6),('m_replyId',68,-5)]]), #106
('_struct',[[('m_purchaseItemId',68,-5)]]), #107
('_struct',[[('m_difficultyLevel',68,-5)]]), #108
('_choice',[(0,3),{0:('None',76),1:('Checked',24),2:('ValueChanged',5),3:('SelectionChanged',68),4:('TextChanged',22)}]), #109
('_struct',[[('m_controlId',68,-7),('m_eventType',68,-6),('m_eventData',109,-5)]]), #110
('_struct',[[('m_soundHash',5,-6),('m_length',5,-5)]]), #111
('_array',[(0,8),5]), #112
('_struct',[[('m_soundHash',112,-2),('m_length',112,-1)]]), #113
('_struct',[[('m_syncInfo',113,-5)]]), #114
('_struct',[[('m_sound',5,-5)]]), #115
('_struct',[[('m_transmissionId',68,-6),('m_thread',5,-5)]]), #116
('_struct',[[('m_transmissionId',68,-5)]]), #117
('_struct',[[('x',73,-2),('y',73,-1)]]), #118
('_optional',[73]), #119
('_struct',[[('m_target',118,-8),('m_distance',119,-7),('m_pitch',119,-6),('m_yaw',119,-5)]]), #120
('_int',[(0,1)]), #121
('_struct',[[('m_skipType',121,-5)]]), #122
('_int',[(0,11)]), #123
('_struct',[[('x',123,-2),('y',123,-1)]]), #124
('_struct',[[('m_button',5,-8),('m_down',24,-7),('m_posUI',124,-6),('m_posWorld',77,-5)]]), #125
('_struct',[[('m_posUI',124,-6),('m_posWorld',77,-5)]]), #126
('_struct',[[('m_achievementLink',73,-5)]]), #127
('_struct',[[('m_soundtrack',5,-5)]]), #128
('_struct',[[('m_planetId',68,-5)]]), #129
('_struct',[[('m_key',96,-6),('m_flags',96,-5)]]), #130
('_struct',[[('m_resources',93,-5)]]), #131
('_struct',[[('m_fulfillRequestId',68,-5)]]), #132
('_struct',[[('m_cancelRequestId',68,-5)]]), #133
('_struct',[[('m_researchItemId',68,-5)]]), #134
('_struct',[[('m_laggingPlayerId',1,-5)]]), #135
('_struct',[[('m_mercenaryId',68,-5)]]), #136
('_struct',[[('m_battleReportId',68,-6),('m_difficultyLevel',68,-5)]]), #137
('_struct',[[('m_battleReportId',68,-5)]]), #138
('_int',[(0,19)]), #139
('_struct',[[('m_decrementMs',139,-5)]]), #140
('_struct',[[('m_portraitId',68,-5)]]), #141
('_struct',[[('m_functionName',13,-5)]]), #142
('_struct',[[('m_result',68,-5)]]), #143
('_struct',[[('m_gameMenuItemIndex',68,-5)]]), #144
('_struct',[[('m_reason',96,-5)]]), #145
('_struct',[[('m_purchaseCategoryId',68,-5)]]), #146
('_struct',[[('m_button',73,-5)]]), #147
('_struct',[[('m_cutsceneId',68,-6),('m_bookmarkName',13,-5)]]), #148
('_struct',[[('m_cutsceneId',68,-5)]]), #149
('_struct',[[('m_cutsceneId',68,-7),('m_conversationLine',13,-6),('m_altConversationLine',13,-5)]]), #150
('_struct',[[('m_cutsceneId',68,-6),('m_conversationLine',13,-5)]]), #151
('_struct',[[('m_recipient',10,-3),('m_string',22,-2)]]), #152
('_struct',[[('m_recipient',10,-3),('m_point',69,-2)]]), #153
('_struct',[[('m_progress',68,-2)]]), #154
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (66, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (58, 'NNet.Game.SBankFileEvent'),
8: (60, 'NNet.Game.SBankSectionEvent'),
9: (61, 'NNet.Game.SBankKeyEvent'),
10: (62, 'NNet.Game.SBankValueEvent'),
11: (64, 'NNet.Game.SBankSignatureEvent'),
12: (65, 'NNet.Game.SUserOptionsEvent'),
22: (67, 'NNet.Game.SSaveGameEvent'),
23: (66, 'NNet.Game.SSaveGameDoneEvent'),
25: (66, 'NNet.Game.SPlayerLeaveEvent'),
26: (71, 'NNet.Game.SGameCheatEvent'),
27: (81, 'NNet.Game.SCmdEvent'),
28: (89, 'NNet.Game.SSelectionDeltaEvent'),
29: (90, 'NNet.Game.SControlGroupUpdateEvent'),
30: (92, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (94, 'NNet.Game.SResourceTradeEvent'),
32: (95, 'NNet.Game.STriggerChatMessageEvent'),
33: (98, 'NNet.Game.SAICommunicateEvent'),
34: (99, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (100, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (101, 'NNet.Game.STriggerPingEvent'),
37: (102, 'NNet.Game.SBroadcastCheatEvent'),
38: (103, 'NNet.Game.SAllianceEvent'),
39: (104, 'NNet.Game.SUnitClickEvent'),
40: (105, 'NNet.Game.SUnitHighlightEvent'),
41: (106, 'NNet.Game.STriggerReplySelectedEvent'),
44: (66, 'NNet.Game.STriggerSkippedEvent'),
45: (111, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (115, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (116, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (117, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (120, 'NNet.Game.SCameraUpdateEvent'),
50: (66, 'NNet.Game.STriggerAbortMissionEvent'),
51: (107, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (66, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (108, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (66, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (110, 'NNet.Game.STriggerDialogControlEvent'),
56: (114, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (122, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (125, 'NNet.Game.STriggerMouseClickedEvent'),
59: (126, 'NNet.Game.STriggerMouseMovedEvent'),
60: (127, 'NNet.Game.SAchievementAwardedEvent'),
63: (66, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (128, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (129, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (130, 'NNet.Game.STriggerKeyPressedEvent'),
67: (142, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (66, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (66, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (131, 'NNet.Game.SResourceRequestEvent'),
71: (132, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (133, 'NNet.Game.SResourceRequestCancelEvent'),
73: (66, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (66, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (134, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
76: (135, 'NNet.Game.SLagMessageEvent'),
77: (66, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (66, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (136, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (66, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (66, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (137, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (138, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (138, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (108, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (66, 'NNet.Game.STriggerMovieStartedEvent'),
87: (66, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (140, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (141, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (143, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (144, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (145, 'NNet.Game.STriggerCameraMoveEvent'),
93: (107, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (146, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (147, 'NNet.Game.STriggerButtonPressedEvent'),
96: (66, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (148, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (149, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (150, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (151, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (152, 'NNet.Game.SChatMessage'),
1: (153, 'NNet.Game.SPingMessage'),
2: (154, 'NNet.Game.SLoadingProgressMessage'),
3: (66, 'NNet.Game.SServerPingMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
}
# NOTE: older builds may not support some types and the generated methods
# may fail to function properly, if specific backwards compatibility is
# needed these values should be tested against for None
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = None
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 6
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = None
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 11
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 32
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 57
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for v in value.values():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid({}) at {}'.format(eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| |
from mock import patch, call
from nose.tools import assert_equal, assert_raises, assert_is
import requests
from performanceplatform.utils import requests_with_backoff
def _make_good_response():
good_response = requests.Response()
good_response.status_code = 200
good_response._content = str('Hello')
return good_response
def _make_bad_response(bad_status_code):
bad_response = requests.Response()
bad_response.status_code = bad_status_code
return bad_response
@patch('time.sleep')
@patch('performanceplatform.utils.requests_with_backoff.request')
def _request_and_assert(request_call,
expected_call,
status_code,
mock_request,
mock_sleep):
"""
Send a passed in request to requests_with_backoff and check
that the wrapped requests package makes an equivalent request
with identical parameters. Also check that sleep was called
in the way expected when the first two requests are bad and
have the passed in status_code.
"""
def _response_generator(bad_status_code):
bad_response = _make_bad_response(bad_status_code)
good_response = _make_good_response()
yield bad_response
yield bad_response
yield good_response
mock_request.side_effect = _response_generator(status_code)
request_call()
assert_equal(
[call(10), call(20)],
mock_sleep.call_args_list)
assert_equal(
[expected_call,
expected_call,
expected_call],
mock_request.call_args_list)
class TestRequestsWithBackoff(object):
# tests for GET
@patch('performanceplatform.utils.requests_with_backoff.request')
def test_get_proxies_requests_get(self, mock_request):
good_response = _make_good_response()
mock_request.return_value = good_response
response = requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_is(response, good_response)
mock_request.assert_called_with('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
def test_get_sleeps_correctly_on_503(self):
_request_and_assert(
lambda: requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
503)
def test_get_sleeps_correctly_on_502(self):
_request_and_assert(
lambda: requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
502)
def test_get_sleeps_correctly_on_403(self):
_request_and_assert(
lambda: requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg'),
403)
@patch('performanceplatform.utils.requests_with_backoff.request')
@patch('time.sleep')
def test_get_does_not_sleep_on_404(self,
mock_sleep,
mock_request):
not_found_response = requests.Response()
not_found_response.status_code = 404
mock_request.return_value = not_found_response
with assert_raises(requests.HTTPError) as e:
requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_equal(e.exception.response.status_code, 404)
assert_equal(
[],
mock_sleep.call_args_list)
mock_request.assert_called_with('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
@patch('performanceplatform.utils.requests_with_backoff.request')
@patch('time.sleep')
def test_get_raises_error_after_5_retries(self,
mock_sleep,
mock_request):
service_unavailable_response = requests.Response()
service_unavailable_response.status_code = 503
mock_request.return_value = service_unavailable_response
with assert_raises(requests.HTTPError) as e:
requests_with_backoff.get('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_equal(e.exception.response.status_code, 503)
assert_equal(
[call(10), call(20), call(40), call(80), call(160)],
mock_sleep.call_args_list)
mock_request.assert_called_with('GET',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
# tests for POST
@patch('performanceplatform.utils.requests_with_backoff.request')
def test_post_proxies_requests_post(self, mock_request):
good_response = _make_good_response()
mock_request.return_value = good_response
response = requests_with_backoff.post('http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_is(response, good_response)
mock_request.assert_called_with('POST',
'http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg')
def test_post_sleeps_correctly_on_503(self):
_request_and_assert(
lambda: requests_with_backoff.post('http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('POST',
'http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
503)
def test_post_sleeps_correctly_on_502(self):
_request_and_assert(
lambda: requests_with_backoff.post('http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('POST',
'http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
502)
def test_post_sleeps_correctly_on_403(self):
_request_and_assert(
lambda: requests_with_backoff.post('http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
call('POST',
'http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg'),
403)
@patch('performanceplatform.utils.requests_with_backoff.request')
@patch('time.sleep')
def test_post_does_not_sleep_on_404(self,
mock_sleep,
mock_request):
not_found_response = requests.Response()
not_found_response.status_code = 404
mock_request.return_value = not_found_response
with assert_raises(requests.HTTPError) as e:
requests_with_backoff.post('http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_equal(e.exception.response.status_code, 404)
assert_equal(
[],
mock_sleep.call_args_list)
mock_request.assert_called_with('POST',
'http://fake.com',
data={},
kwarg1='a kwarg',
kwarg2='another kwarg')
@patch('performanceplatform.utils.requests_with_backoff.request')
@patch('time.sleep')
def test_post_raises_error_after_5_retries(self,
mock_sleep,
mock_request):
service_unavailable_response = requests.Response()
service_unavailable_response.status_code = 503
mock_request.return_value = service_unavailable_response
with assert_raises(requests.HTTPError) as e:
requests_with_backoff.post('http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
assert_equal(e.exception.response.status_code, 503)
assert_equal(
[call(10), call(20), call(40), call(80), call(160)],
mock_sleep.call_args_list)
mock_request.assert_called_with('POST',
'http://fake.com',
kwarg1='a kwarg',
kwarg2='another kwarg')
| |
from __future__ import unicode_literals
import collections
import datetime
import decimal
import inspect
import math
import os
import re
import types
from importlib import import_module
from django.apps import apps
from django.db import migrations, models
from django.db.migrations.loader import MigrationLoader
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import Promise
from django.utils.module_loading import module_dir
from django.utils.timezone import utc
from django.utils.version import get_docs_version
COMPILED_REGEX_TYPE = type(re.compile(''))
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
indentation = 2
def __init__(self, operation):
self.operation = operation
self.buff = []
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
argspec = inspect.getargspec(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
# Start at one because argspec includes "self"
for i, arg in enumerate(args, 1):
arg_value = arg
arg_name = argspec.args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in argspec.args[i + 1:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
}
imports = {"from django.db import migrations, models"}
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match("^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
imports.discard("from django.db import models")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
return (MIGRATION_TEMPLATE % items).encode("utf8")
@staticmethod
def serialize_datetime(value):
"""
Returns a serialized version of a datetime object that is valid,
executable python code. It converts timezone-aware values to utc with
an 'executable' utc representation of tzinfo.
"""
if value.tzinfo is not None and value.tzinfo != utc:
value = value.astimezone(utc)
value_repr = repr(value).replace("<UTC>", "utc")
if isinstance(value, datetime_safe.datetime):
value_repr = "datetime.%s" % value_repr
return value_repr
@property
def basedir(self):
migrations_package_name = MigrationLoader.migrations_module(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return upath(module_dir(migrations_module))
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = upath(module_dir(base_module))
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." %
migrations_package_name)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize_deconstructed(cls, path, args, kwargs):
name, imports = cls._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in sorted(kwargs.items()):
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(strings)), imports
@classmethod
def _serialize_path(cls, path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# FIXME: Ideally Promise would be reconstructible, but for now we
# use force_text on them and defer to the normal string serialization
# process.
if isinstance(value, Promise):
value = force_text(value)
# Sequences
if isinstance(value, (frozenset, list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
# Don't use the literal "{%s}" as it doesn't support empty set
format = "set([%s])"
elif isinstance(value, frozenset):
format = "frozenset([%s])"
elif isinstance(value, tuple):
# When len(value)==0, the empty tuple should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(value) != 1 else "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in sorted(value.items()):
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports
# Datetimes
elif isinstance(value, datetime.datetime):
value_repr = cls.serialize_datetime(value)
imports = ["import datetime"]
if value.tzinfo is not None:
imports.append("from django.utils.timezone import utc")
return value_repr, set(imports)
# Dates
elif isinstance(value, datetime.date):
value_repr = repr(value)
if isinstance(value, datetime_safe.date):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Times
elif isinstance(value, datetime.time):
value_repr = repr(value)
if isinstance(value, datetime_safe.time):
value_repr = "datetime.%s" % value_repr
return value_repr, {"import datetime"}
# Settings references
elif isinstance(value, SettingsReference):
return "settings.%s" % value.setting_name, {"from django.conf import settings"}
# Simple types
elif isinstance(value, float):
if math.isnan(value) or math.isinf(value):
return 'float("{}")'.format(value), set()
return repr(value), set()
elif isinstance(value, six.integer_types + (bool, type(None))):
return repr(value), set()
elif isinstance(value, six.binary_type):
value_repr = repr(value)
if six.PY2:
# Prepend the `b` prefix since we're importing unicode_literals
value_repr = 'b' + value_repr
return value_repr, set()
elif isinstance(value, six.text_type):
value_repr = repr(value)
if six.PY2:
# Strip the `u` prefix since we're importing unicode_literals
value_repr = value_repr[1:]
return value_repr, set()
# Decimal
elif isinstance(value, decimal.Decimal):
return repr(value), {"from decimal import Decimal"}
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
return cls.serialize_deconstructed(path, args, kwargs)
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
if module == six.moves.builtins.__name__:
return value.__name__, set()
else:
return "%s.%s" % (module, value.__name__), {"import %s" % module}
elif isinstance(value, models.manager.BaseManager):
as_manager, manager_path, qs_path, args, kwargs = value.deconstruct()
if as_manager:
name, imports = cls._serialize_path(qs_path)
return "%s.as_manager()" % name, imports
else:
return cls.serialize_deconstructed(manager_path, args, kwargs)
# Anything that knows how to deconstruct itself.
elif hasattr(value, 'deconstruct'):
return cls.serialize_deconstructed(*value.deconstruct())
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# @classmethod?
if getattr(value, "__self__", None) and isinstance(value.__self__, type):
klass = value.__self__
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), {"import %s" % module}
# Further error checking
if value.__name__ == '<lambda>':
raise ValueError("Cannot serialize function: lambda")
if value.__module__ is None:
raise ValueError("Cannot serialize function %r: No module" % value)
# Python 3 is a lot easier, and only uses this branch if it's not local.
if getattr(value, "__qualname__", None) and getattr(value, "__module__", None):
if "<" not in value.__qualname__: # Qualname can include <locals>
return "%s.%s" % (value.__module__, value.__qualname__), {"import %s" % value.__module__}
# Python 2/fallback version
module_name = value.__module__
# Make sure it's actually there and not an unbound method
module = import_module(module_name)
if not hasattr(module, value.__name__):
raise ValueError(
"Could not find function %s in %s.\n"
"Please note that due to Python 2 limitations, you cannot "
"serialize unbound method functions (e.g. a method "
"declared and used in the same class body). Please move "
"the function into the main module body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (value.__name__, module_name, get_docs_version()))
return "%s.%s" % (module_name, value.__name__), {"import %s" % module_name}
# Other iterables
elif isinstance(value, collections.Iterable):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
# When len(strings)==0, the empty iterable should be serialized as
# "()", not "(,)" because (,) is invalid Python syntax.
format = "(%s)" if len(strings) != 1 else "(%s,)"
return format % (", ".join(strings)), imports
# Compiled regex
elif isinstance(value, COMPILED_REGEX_TYPE):
imports = {"import re"}
regex_pattern, pattern_imports = cls.serialize(value.pattern)
regex_flags, flag_imports = cls.serialize(value.flags)
imports.update(pattern_imports)
imports.update(flag_imports)
args = [regex_pattern]
if value.flags:
args.append(regex_flags)
return "re.compile(%s)" % ', '.join(args), imports
# Uh oh.
else:
raise ValueError(
"Cannot serialize: %r\nThere are some values Django cannot serialize into "
"migration files.\nFor more, see https://docs.djangoproject.com/en/%s/"
"topics/migrations/#migration-serializing" % (value, get_docs_version())
)
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| |
"""
SQL standards reserved words
"""
RESERVED_WORDS = ["A",
"ABORT",
"ABS",
"ABSENT",
"ABSOLUTE",
"ACCESS",
"ACCORDING",
"ACTION",
"ADA",
"ADD",
"ADMIN",
"AFTER",
"AGGREGATE",
"ALL",
"ALLOCATE",
"ALSO",
"ALTER",
"ALWAYS",
"ANALYSE",
"ANALYZE",
"AND",
"ANY",
"ARE",
"ARRAY",
"ARRAY_AGG",
"ARRAY_MAX_CARDINALITY",
"AS",
"ASC",
"ASENSITIVE",
"ASSERTION",
"ASSIGNMENT",
"ASYMMETRIC",
"AT",
"ATOMIC",
"ATTRIBUTE",
"ATTRIBUTES",
"AUTHORIZATION",
"AVG",
"BACKWARD",
"BASE64",
"BEFORE",
"BEGIN",
"BEGIN_FRAME",
"BEGIN_PARTITION",
"BERNOULLI",
"BETWEEN",
"BIGINT",
"BINARY",
"BIT",
"BIT_LENGTH",
"BLOB",
"BLOCKED",
"BOM",
"BOOLEAN",
"BOTH",
"BREADTH",
"BY",
"C",
"CACHE",
"CALL",
"CALLED",
"CARDINALITY",
"CASCADE",
"CASCADED",
"CASE",
"CAST",
"CATALOG",
"CATALOG_NAME",
"CEIL",
"CEILING",
"CHAIN",
"CHAR",
"CHARACTER",
"CHARACTERISTICS",
"CHARACTERS",
"CHARACTER_LENGTH",
"CHARACTER_SET_CATALOG",
"CHARACTER_SET_NAME",
"CHARACTER_SET_SCHEMA",
"CHAR_LENGTH",
"CHECK",
"CHECKPOINT",
"CLASS",
"CLASS_ORIGIN",
"CLOB",
"CLOSE",
"CLUSTER",
"COALESCE",
"COBOL",
"COLLATE",
"COLLATION",
"COLLATION_CATALOG",
"COLLATION_NAME",
"COLLATION_SCHEMA",
"COLLECT",
"COLUMN",
"COLUMNS",
"COLUMN_NAME",
"COMMAND_FUNCTION",
"COMMAND_FUNCTION_CODE",
"COMMENT",
"COMMENTS",
"COMMIT",
"COMMITTED",
"CONCURRENTLY",
"CONDITION",
"CONDITION_NUMBER",
"CONFIGURATION",
"CONNECT",
"CONNECTION",
"CONNECTION_NAME",
"CONSTRAINT",
"CONSTRAINTS",
"CONSTRAINT_CATALOG",
"CONSTRAINT_NAME",
"CONSTRAINT_SCHEMA",
"CONSTRUCTOR",
"CONTAINS",
"CONTENT",
"CONTINUE",
"CONTROL",
"CONVERSION",
"CONVERT",
"COPY",
"CORR",
"CORRESPONDING",
"COST",
"COUNT",
"COVAR_POP",
"COVAR_SAMP",
"CREATE",
"CROSS",
"CSV",
"CUBE",
"CUME_DIST",
"CURRENT",
"CURRENT_CATALOG",
"CURRENT_DATE",
"CURRENT_DEFAULT_TRANSFORM_GROUP",
"CURRENT_PATH",
"CURRENT_ROLE",
"CURRENT_ROW",
"CURRENT_SCHEMA",
"CURRENT_TIME",
"CURRENT_TIMESTAMP",
"CURRENT_TRANSFORM_GROUP_FOR_TYPE",
"CURRENT_USER",
"CURSOR",
"CURSOR_NAME",
"CYCLE",
"DATA",
"DATABASE",
"DATALINK",
"DATE",
"DATETIME_INTERVAL_CODE",
"DATETIME_INTERVAL_PRECISION",
"DAY",
"DB",
"DEALLOCATE",
"DEC",
"DECIMAL",
"DECLARE",
"DEFAULT",
"DEFAULTS",
"DEFERRABLE",
"DEFERRED",
"DEFINED",
"DEFINER",
"DEGREE",
"DELETE",
"DELIMITER",
"DELIMITERS",
"DENSE_RANK",
"DEPTH",
"DEREF",
"DERIVED",
"DESC",
"DESCRIBE",
"DESCRIPTOR",
"DETERMINISTIC",
"DIAGNOSTICS",
"DICTIONARY",
"DISABLE",
"DISCARD",
"DISCONNECT",
"DISPATCH",
"DISTINCT",
"DLNEWCOPY",
"DLPREVIOUSCOPY",
"DLURLCOMPLETE",
"DLURLCOMPLETEONLY",
"DLURLCOMPLETEWRITE",
"DLURLPATH",
"DLURLPATHONLY",
"DLURLPATHWRITE",
"DLURLSCHEME",
"DLURLSERVER",
"DLVALUE",
"DO",
"DOCUMENT",
"DOMAIN",
"DOUBLE",
"DROP",
"DYNAMIC",
"DYNAMIC_FUNCTION",
"DYNAMIC_FUNCTION_CODE",
"EACH",
"ELEMENT",
"ELSE",
"EMPTY",
"ENABLE",
"ENCODING",
"ENCRYPTED",
"END",
"END-EXEC",
"END_FRAME",
"END_PARTITION",
"ENFORCED",
"ENUM",
"EQUALS",
"ESCAPE",
"EVENT",
"EVERY",
"EXCEPT",
"EXCEPTION",
"EXCLUDE",
"EXCLUDING",
"EXCLUSIVE",
"EXEC",
"EXECUTE",
"EXISTS",
"EXP",
"EXPLAIN",
"EXPRESSION",
"EXTENSION",
"EXTERNAL",
"EXTRACT",
"FALSE",
"FAMILY",
"FETCH",
"FILE",
"FILTER",
"FINAL",
"FIRST",
"FIRST_VALUE",
"FLAG",
"FLOAT",
"FLOOR",
"FOLLOWING",
"FOR",
"FORCE",
"FOREIGN",
"FORTRAN",
"FORWARD",
"FOUND",
"FRAME_ROW",
"FREE",
"FREEZE",
"FROM",
"FS",
"FULL",
"FUNCTION",
"FUNCTIONS",
"FUSION",
"G",
"GENERAL",
"GENERATED",
"GET",
"GLOBAL",
"GO",
"GOTO",
"GRANT",
"GRANTED",
"GREATEST",
"GROUP",
"GROUPING",
"GROUPS",
"HANDLER",
"HAVING",
"HEADER",
"HEX",
"HIERARCHY",
"HOLD",
"HOUR",
"ID",
"IDENTITY",
"IF",
"IGNORE",
"ILIKE",
"IMMEDIATE",
"IMMEDIATELY",
"IMMUTABLE",
"IMPLEMENTATION",
"IMPLICIT",
"IMPORT",
"IN",
"INCLUDING",
"INCREMENT",
"INDENT",
"INDEX",
"INDEXES",
"INDICATOR",
"INHERIT",
"INHERITS",
"INITIALLY",
"INLINE",
"INNER",
"INOUT",
"INPUT",
"INSENSITIVE",
"INSERT",
"INSTANCE",
"INSTANTIABLE",
"INSTEAD",
"INT",
"INTEGER",
"INTEGRITY",
"INTERSECT",
"INTERSECTION",
"INTERVAL",
"INTO",
"INVOKER",
"IS",
"ISNULL",
"ISOLATION",
"JOIN",
"K",
"KEY",
"KEY_MEMBER",
"KEY_TYPE",
"LABEL",
"LAG",
"LANGUAGE",
"LARGE",
"LAST",
"LAST_VALUE",
"LATERAL",
"LC_COLLATE",
"LC_CTYPE",
"LEAD",
"LEADING",
"LEAKPROOF",
"LEAST",
"LEFT",
"LENGTH",
"LEVEL",
"LIBRARY",
"LIKE",
"LIKE_REGEX",
"LIMIT",
"LINK",
"LISTEN",
"LN",
"LOAD",
"LOCAL",
"LOCALTIME",
"LOCALTIMESTAMP",
"LOCATION",
"LOCATOR",
"LOCK",
"LOWER",
"M",
"MAP",
"MAPPING",
"MATCH",
"MATCHED",
"MATERIALIZED",
"MAX",
"MAXVALUE",
"MAX_CARDINALITY",
"MEMBER",
"MERGE",
"MESSAGE_LENGTH",
"MESSAGE_OCTET_LENGTH",
"MESSAGE_TEXT",
"METHOD",
"MIN",
"MINUTE",
"MINVALUE",
"MOD",
"MODE",
"MODIFIES",
"MODULE",
"MONTH",
"MORE",
"MOVE",
"MULTISET",
"MUMPS",
"NAME",
"NAMES",
"NAMESPACE",
"NATIONAL",
"NATURAL",
"NCHAR",
"NCLOB",
"NESTING",
"NEW",
"NEXT",
"NFC",
"NFD",
"NFKC",
"NFKD",
"NIL",
"NO",
"NONE",
"NORMALIZE",
"NORMALIZED",
"NOT",
"NOTHING",
"NOTIFY",
"NOTNULL",
"NOWAIT",
"NTH_VALUE",
"NTILE",
"NULL",
"NULLABLE",
"NULLIF",
"NULLS",
"NUMBER",
"NUMERIC",
"OBJECT",
"OCCURRENCES_REGEX",
"OCTETS",
"OCTET_LENGTH",
"OF",
"OFF",
"OFFSET",
"OIDS",
"OLD",
"ON",
"ONLY",
"OPEN",
"OPERATOR",
"OPTION",
"OPTIONS",
"OR",
"ORDER",
"ORDERING",
"ORDINALITY",
"OTHERS",
"OUT",
"OUTER",
"OUTPUT",
"OVER",
"OVERLAPS",
"OVERLAY",
"OVERRIDING",
"OWNED",
"OWNER",
"P",
"PAD",
"PARAMETER",
"PARAMETER_MODE",
"PARAMETER_NAME",
"PARAMETER_ORDINAL_POSITION",
"PARAMETER_SPECIFIC_CATALOG",
"PARAMETER_SPECIFIC_NAME",
"PARAMETER_SPECIFIC_SCHEMA",
"PARSER",
"PARTIAL",
"PARTITION",
"PASCAL",
"PASSING",
"PASSTHROUGH",
"PASSWORD",
"PATH",
"PERCENT",
"PERCENTILE_CONT",
"PERCENTILE_DISC",
"PERCENT_RANK",
"PERIOD",
"PERMISSION",
"PLACING",
"PLANS",
"PLI",
"PORTION",
"POSITION",
"POSITION_REGEX",
"POWER",
"PRECEDES",
"PRECEDING",
"PRECISION",
"PREPARE",
"PREPARED",
"PRESERVE",
"PRIMARY",
"PRIOR",
"PRIVILEGES",
"PROCEDURAL",
"PROCEDURE",
"PROGRAM",
"PUBLIC",
"QUOTE",
"RANGE",
"RANK",
"READ",
"READS",
"REAL",
"REASSIGN",
"RECHECK",
"RECOVERY",
"RECURSIVE",
"REF",
"REFERENCES",
"REFERENCING",
"REFRESH",
"REGR_AVGX",
"REGR_AVGY",
"REGR_COUNT",
"REGR_INTERCEPT",
"REGR_R2",
"REGR_SLOPE",
"REGR_SXX",
"REGR_SXY",
"REGR_SYY",
"REINDEX",
"RELATIVE",
"RELEASE",
"RENAME",
"REPEATABLE",
"REPLACE",
"REPLICA",
"REQUIRING",
"RESET",
"RESPECT",
"RESTART",
"RESTORE",
"RESTRICT",
"RESULT",
"RETURN",
"RETURNED_CARDINALITY",
"RETURNED_LENGTH",
"RETURNED_OCTET_LENGTH",
"RETURNED_SQLSTATE",
"RETURNING",
"RETURNS",
"REVOKE",
"RIGHT",
"ROLE",
"ROLLBACK",
"ROLLUP",
"ROUTINE",
"ROUTINE_CATALOG",
"ROUTINE_NAME",
"ROUTINE_SCHEMA",
"ROW",
"ROWS",
"ROW_COUNT",
"ROW_NUMBER",
"RULE",
"SAVEPOINT",
"SCALE",
"SCHEMA",
"SCHEMA_NAME",
"SCOPE",
"SCOPE_CATALOG",
"SCOPE_NAME",
"SCOPE_SCHEMA",
"SCROLL",
"SEARCH",
"SECOND",
"SECTION",
"SECURITY",
"SELECT",
"SELECTIVE",
"SELF",
"SENSITIVE",
"SEQUENCE",
"SEQUENCES",
"SERIALIZABLE",
"SERVER",
"SERVER_NAME",
"SESSION",
"SESSION_USER",
"SET",
"SETOF",
"SETS",
"SHARE",
"SHOW",
"SIMILAR",
"SIMPLE",
"SIZE",
"SMALLINT",
"SNAPSHOT",
"SOME",
"SOURCE",
"SPACE",
"SPECIFIC",
"SPECIFICTYPE",
"SPECIFIC_NAME",
"SQL",
"SQLCODE",
"SQLERROR",
"SQLEXCEPTION",
"SQLSTATE",
"SQLWARNING",
"SQRT",
"STABLE",
"STANDALONE",
"START",
"STATE",
"STATEMENT",
"STATIC",
"STATISTICS",
"STDDEV_POP",
"STDDEV_SAMP",
"STDIN",
"STDOUT",
"STORAGE",
"STRICT",
"STRIP",
"STRUCTURE",
"STYLE",
"SUBCLASS_ORIGIN",
"SUBMULTISET",
"SUBSTRING",
"SUBSTRING_REGEX",
"SUCCEEDS",
"SUM",
"SYMMETRIC",
"SYSID",
"SYSTEM",
"SYSTEM_TIME",
"SYSTEM_USER",
"T",
"TABLE",
"TABLES",
"TABLESAMPLE",
"TABLESPACE",
"TABLE_NAME",
"TEMP",
"TEMPLATE",
"TEMPORARY",
"TEXT",
"THEN",
"TIES",
"TIME",
"TIMESTAMP",
"TIMEZONE_HOUR",
"TIMEZONE_MINUTE",
"TO",
"TOKEN",
"TOP_LEVEL_COUNT",
"TRAILING",
"TRANSACTION",
"TRANSACTIONS_COMMITTED",
"TRANSACTIONS_ROLLED_BACK",
"TRANSACTION_ACTIVE",
"TRANSFORM",
"TRANSFORMS",
"TRANSLATE",
"TRANSLATE_REGEX",
"TRANSLATION",
"TREAT",
"TRIGGER",
"TRIGGER_CATALOG",
"TRIGGER_NAME",
"TRIGGER_SCHEMA",
"TRIM",
"TRIM_ARRAY",
"TRUE",
"TRUNCATE",
"TRUSTED",
"TYPE",
"TYPES",
"UESCAPE",
"UNBOUNDED",
"UNCOMMITTED",
"UNDER",
"UNENCRYPTED",
"UNION",
"UNIQUE",
"UNKNOWN",
"UNLINK",
"UNLISTEN",
"UNLOGGED",
"UNNAMED",
"UNNEST",
"UNTIL",
"UNTYPED",
"UPDATE",
"UPPER",
"URI",
"USAGE",
"USER",
"USER_DEFINED_TYPE_CATALOG",
"USER_DEFINED_TYPE_CODE",
"USER_DEFINED_TYPE_NAME",
"USER_DEFINED_TYPE_SCHEMA",
"USING",
"VACUUM",
"VALID",
"VALIDATE",
"VALIDATOR",
"VALUE",
"VALUES",
"VALUE_OF",
"VARBINARY",
"VARCHAR",
"VARIADIC",
"VARYING",
"VAR_POP",
"VAR_SAMP",
"VERBOSE",
"VERSION",
"VERSIONING",
"VIEW",
"VOLATILE",
"WHEN",
"WHENEVER",
"WHERE",
"WHITESPACE",
"WIDTH_BUCKET",
"WINDOW",
"WITH",
"WITHIN",
"WITHOUT",
"WORK",
"WRAPPER",
"WRITE",
"XML",
"XMLAGG",
"XMLATTRIBUTES",
"XMLBINARY",
"XMLCAST",
"XMLCOMMENT",
"XMLCONCAT",
"XMLDECLARATION",
"XMLDOCUMENT",
"XMLELEMENT",
"XMLEXISTS",
"XMLFOREST",
"XMLITERATE",
"XMLNAMESPACES",
"XMLPARSE",
"XMLPI",
"XMLQUERY",
"XMLROOT",
"XMLSCHEMA",
"XMLSERIALIZE",
"XMLTABLE",
"XMLTEXT",
"XMLVALIDATE",
"YEAR",
"YES",
"ZONE"
]
| |
# -*- coding: utf-8 -*-
#==============================================================================
# Copyright: Hybrid Labs
# Licence: See LICENSE
#==============================================================================
import logging
import mimetypes
import os
import sys
from bottle import get, default_app, static_file
from bson import json_util as json
from greenlet import greenlet as Greenlet
from six import StringIO
from lxml import html
from lxml.html import builder as E
from sockjs.tornado import router as _router, SockJSRouter
from sockjs.tornado import SockJSConnection
from tornado import gen
from tornado.escape import xhtml_unescape as unescape
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, FallbackHandler
from tornado.wsgi import WSGIContainer
from . import build, client, compiler, _log
from .model import model
_routes = []
_root_path = os.path.dirname(__file__)
_view_path = 'views'
_controller_path = 'controllers'
_cdn = True
_bundle_files = [
(
'SockJS',
'//cdnjs.cloudflare.com/ajax/libs/sockjs-client/0.3.4/sockjs.min.js',
'sockjs-0.3.4.min.js'
),
(
'jQuery',
'//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js',
'jquery-1.10.2/jquery.min.js'
),
(
'angular',
'//ajax.googleapis.com/ajax/libs/angularjs/1.2.6/angular.min.js',
'angular-1.2.6/angular.min.js'
),
(
'check(angular.module, ["ngAnimate"])',
'//ajax.googleapis.com/ajax/libs/angularjs/1.2.6/'
'angular-animate.min.js',
'angular-1.2.6/angular-animate.min.js'
),
(
'angulate',
'angulate-0.1.0/angulate.js'
),
(
'avalon',
'avalon.js'
)
]
_router.DEFAULT_SETTINGS['sockjs_url'] = '/bundle/sockjs-0.3.4.min.js'
_methods = {}
# Fix mimetypes
mimetypes.add_type('image/png', '.png', True)
mimetypes.add_type('audio/mpeg', '.mp3', True)
mimetypes.add_type('application/x-font-ttf', '.ttf', True)
mimetypes.add_type('application/x-font-woff', '.woff', True)
class ChannelConnection(SockJSConnection):
route = None
func = None
def __init__(self, *args, **kwargs):
super(ChannelConnection, self).__init__(*args, **kwargs)
self.info = None
def on_open(self, info):
self.info = info
_log.info('OPEN Channel {0} ({1})'.format(self.route, info.ip))
@gen.coroutine
def on_message(self, message):
try:
yield Greenlet(gen.coroutine(self.func)).switch(message)
except Exception as e:
_log.exception(e)
def on_close(self):
_log.info('CLOSE Channel {0} ({1})'.format(self.route, self.info.ip))
def channel(route):
def _d(f):
attrs = {'route': route, 'func': f}
connection = type('ChannelConnection', (ChannelConnection, ), attrs)
_routes.extend(SockJSRouter(connection, route).urls)
return f
return _d
def method(func_or_str=None):
if callable(func_or_str):
f = func_or_str
method_name = '{0}.{1}'.format(f.__module__, f.__name__)
assert method_name not in _methods, \
"Server method '{0}' already exists".format(method_name)
_methods[method_name] = f
f.__server_method__ = method_name
return f
def _d(f):
method_name = func_or_str or '{0}.{1}'.format(
f.__module__, f.__name__)
assert method_name not in _methods, \
"Server method '{0}' already exists".format(method_name)
_methods[method_name] = f
f.__server_method__ = method_name
return f
return _d
@channel('/_avalon')
def _server(request, message):
message = json.loads(message)
method = message['method']
params = message['params']
if method == 'subscribe':
model.subscribe(request, *params)
if method == 'update':
model[params[0]].update(query=params[1], **params[2])
if method == 'rpc':
if not _methods.get(params[0]):
raise ValueError('Method {0} not found'.format(params[0]))
request.send(json.dumps({
'id': message['id'],
'response': 'rpc',
'result': _methods[params[0]](*params[1:])
}))
@get('/')
def _index():
# Gather, convert and process assets
DOCTYPE = '<!DOCTYPE html>'
style = StringIO()
head = E.HEAD()
body = E.BODY()
templates = []
template_names = []
def visit(node, f):
for c in node.getchildren():
visit(c, f)
if c.tag != 'template':
continue
names = [n[1:] for n in c.keys() if n and n[0] == ':']
if not names:
_log.error('Unbound template found (%s)', f)
continue
for name in names:
if name in template_names:
_log.error('Duplicate template "%s" found (%s)', name, f)
continue
template = E.SCRIPT(
id='template-{0}'.format(name),
type='text/x-angulate-template'
)
template.text = c.text
template.extend(c.getchildren())
templates.append(template)
template_names.extend(names)
node.remove(c)
return
for dirpath, dirnames, filenames in os.walk(_view_path):
for filename in filenames:
ext = os.path.splitext(filename)[-1]
filename = os.path.join(dirpath, filename)
handler = build.style_handler.get(ext)
if handler:
style.write(handler(filename))
continue
handler = build.template_handler.get(ext)
if not handler:
continue
contents = handler(filename)
if not contents:
_log.warning('View is empty (%s)', filename)
continue
try:
dom = html.fromstring('<head></head>' + contents)
except Exception as e:
_log.error('Parse error (%s) %s', filename, e)
continue
for e in dom.getchildren():
if e.tag == 'head':
head.extend(e.getchildren())
elif e.tag == 'body':
visit(e, filename)
body.text = (body.text or '') + (e.text or '')
body.extend(e.getchildren())
elif e.tag == 'template':
visit(E.BODY(e), filename)
else:
_log.error('View is invalid (%s)', filename)
continue
s = 'angulate.registerTemplate("{0}", "{1}");'
templates.append(
E.SCRIPT(
'\n'.join([
s.format(name, 'template-{0}'.format(name))
for name in template_names
]),
type='text/javascript'))
# Append styles
head.append(E.STYLE(style.getvalue()))
# Append compiled runtime and Javascript functions
body.extend([
E.SCRIPT(
compiler.runtime(),
type='text/javascript'),
E.SCRIPT(
'\n'.join(f for f in client.compiled()),
type='text/javascript')
])
# Append bundle
for b in _bundle_files:
assert len(b) in [2, 3], 'Invalid bundle file config'
if len(b) == 2:
body.append(E.SCRIPT(
src='bundle/{0}'.format(b[1]),
type='text/javascript'))
elif _cdn:
link = html.tostring(E.SCRIPT(
src='bundle/{0}'.format(b[2]),
type='text/javascript'
), encoding='utf-8')
link = link.decode('utf-8').replace('</script>', '<\/script>')
body.extend([
E.SCRIPT(
src=b[1],
type='text/javascript'),
E.SCRIPT(
"window.{0} || document.write('{1}')".format(b[0], link),
type='text/javascript')
])
else:
body.append(E.SCRIPT(
src='bundle/{0}'.format(b[2]),
type='text/javascript'))
# Append templates
body.extend(templates)
# Bootstrap angular
body.append(E.SCRIPT(
'\n'.join([
'window.app = angular.module("app", ["ngAnimate", "angulate"]);',
'window.app.run(["$rootScope", function($rootScope) {',
' $rootScope._session = avalon.session;',
' avalon.scope = $rootScope;'
'}])',
'angular.bootstrap(document, ["app"]);'
]),
type='text/javascript'))
return unescape(html.tostring(E.HTML(head, body), doctype=DOCTYPE,
encoding='utf-8'))
@get('/bundle/<filename:re:(?!\.).+>')
def _bundle(filename):
return static_file(filename, root=os.path.join(_root_path, 'bundle'))
@get('/<filename:re:(?!\.).+>')
def _static(filename):
return static_file(filename, root=_view_path)
def serve(db=None, mount_app=None, port=8080, verbose=False,
view_path=None, controller_path=None, cdn=True):
global _view_path, _controller_path, _cdn
_view_path = view_path or _view_path
_controller_path = controller_path or _controller_path
_cdn = cdn
if verbose:
_log.setLevel(logging.INFO)
if mount_app:
r = _routes + [(mount_app[0], FallbackHandler, {
'fallback': WSGIContainer(mount_app[1])
})]
else:
r = _routes
# Connect to db
if db:
model.connect(db)
wsgi_app = WSGIContainer(default_app())
app = Application(r + [
('.*', FallbackHandler, {'fallback': wsgi_app})
])
# Import controllers
module_path = os.path.join(_controller_path, '..')
if module_path not in sys.path:
sys.path.append(module_path)
for dirpath, dirnames, filenames in os.walk(_controller_path):
for f in filenames:
module, ext = os.path.splitext(f)
if ext != '.py':
continue
Greenlet(__import__).switch('{0}.{1}'.format(dirpath, module))
server = HTTPServer(app)
server.listen(port)
IOLoop.instance().start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.