repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
jmhutch/dotvim | vim/autoload/conque_term/conque_subprocess.py | 30 | 5370 | # FILE: autoload/conque_term/conque_subprocess.py
# AUTHOR: Nico Raffo <nicoraffo@gmail.com>
# WEBSITE: http://conque.googlecode.com
# MODIFIED: 2011-09-02
# VERSION: 2.3, for Vim 7.0
# LICENSE:
# Conque - Vim terminal/console emulator
# Copyright (C) 2009-2011 Nico Raffo
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
ConqueSubprocess
Create and interact with a subprocess through a pty.
Usage:
p = ConqueSubprocess()
p.open('bash', {'TERM':'vt100'})
output = p.read()
p.write('cd ~/vim' + "\r")
p.write('ls -lha' + "\r")
output += p.read(timeout = 500)
p.close()
"""
import os
import signal
import pty
import tty
import select
import fcntl
import termios
import struct
import shlex
class ConqueSubprocess:
# process id
pid = 0
# stdout+stderr file descriptor
fd = None
def open(self, command, env={}):
""" Create subprocess using forkpty() """
# parse command
command_arr = shlex.split(command)
executable = command_arr[0]
args = command_arr
# try to fork a new pty
try:
self.pid, self.fd = pty.fork()
except:
return False
# child proc, replace with command after altering terminal attributes
if self.pid == 0:
# set requested environment variables
for k in env.keys():
os.environ[k] = env[k]
# set tty attributes
try:
attrs = tty.tcgetattr(1)
attrs[0] = attrs[0] ^ tty.IGNBRK
attrs[0] = attrs[0] | tty.BRKINT | tty.IXANY | tty.IMAXBEL
attrs[2] = attrs[2] | tty.HUPCL
attrs[3] = attrs[3] | tty.ICANON | tty.ECHO | tty.ISIG | tty.ECHOKE
attrs[6][tty.VMIN] = 1
attrs[6][tty.VTIME] = 0
tty.tcsetattr(1, tty.TCSANOW, attrs)
except:
pass
# replace this process with the subprocess
os.execvp(executable, args)
# else master, do nothing
else:
pass
def read(self, timeout=1):
""" Read from subprocess and return new output """
output = ''
read_timeout = float(timeout) / 1000
read_ct = 0
try:
# read from fd until no more output
while 1:
s_read, s_write, s_error = select.select([self.fd], [], [], read_timeout)
lines = ''
for s_fd in s_read:
try:
# increase read buffer so huge reads don't slow down
if read_ct < 10:
lines = os.read(self.fd, 32)
elif read_ct < 50:
lines = os.read(self.fd, 512)
else:
lines = os.read(self.fd, 2048)
read_ct += 1
except:
pass
output = output + lines.decode('utf-8')
if lines == '' or read_ct > 100:
break
except:
pass
return output
def write(self, input):
""" Write new input to subprocess """
try:
if CONQUE_PYTHON_VERSION == 2:
os.write(self.fd, input.encode('utf-8', 'ignore'))
else:
os.write(self.fd, bytes(input, 'utf-8'))
except:
pass
def signal(self, signum):
""" signal process """
try:
os.kill(self.pid, signum)
except:
pass
def close(self):
""" close process with sigterm signal """
self.signal(15)
def is_alive(self):
""" get process status """
p_status = True
try:
if os.waitpid(self.pid, os.WNOHANG)[0]:
p_status = False
except:
p_status = False
return p_status
def window_resize(self, lines, columns):
""" update window size in kernel, then send SIGWINCH to fg process """
try:
fcntl.ioctl(self.fd, termios.TIOCSWINSZ, struct.pack("HHHH", lines, columns, 0, 0))
os.kill(self.pid, signal.SIGWINCH)
except:
pass
# vim:foldmethod=marker
| isc |
jor-/scipy | scipy/optimize/tests/test_least_squares.py | 5 | 29339 | from __future__ import division
from itertools import product
import numpy as np
from numpy.linalg import norm
from numpy.testing import (assert_, assert_allclose,
assert_equal)
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-d array, return is 2-d array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal(object):
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem(object):
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin(object):
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
assert_(res2.nfev != res3.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000 # To account for Jacobian estimation.
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
def test_error_raised_when_all_tolerances_below_eps(self):
# Test that all 0 tolerances are not allowed.
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method=self.method, ftol=None, xtol=None, gtol=None)
def test_convergence_with_only_one_tolerance_enabled(self):
x0 = [-2, 1]
x_opt = [1, 1]
for ftol, xtol, gtol in [(1e-8, None, None),
(None, 1e-8, None),
(None, None, 1e-8)]:
res = least_squares(fun_rosenbrock, x0, jac=jac_rosenbrock,
ftol=ftol, gtol=gtol, xtol=xtol,
method=self.method)
assert_allclose(res.x, x_opt)
class BoundsMixin(object):
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array wont't be broadcasted
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin(object):
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin(object):
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now let's apply `loss_scale` to turn the residual into an inlier.
# The loss function becomes linear.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
# 'soft_l1' always gives a positive scaling.
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
# For 'cauchy' the correction term turns out to be negative, and it
# replaced by EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Now use scaling to turn the residual to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
# 'arctan' gives an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
# cubic_soft_l1 will give an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
| bsd-3-clause |
debugger06/MiroX | osx/plat/frontends/widgets/bonjour.py | 3 | 2105 | # Miro - an RSS based video player application
# Copyright (C) 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import logging
from miro import app
from miro import prefs
from miro.gtcache import gettext as _
from miro.frontends.widgets import dialogs
# We can't really do much here ... this shouldn't happen on the Mac
# but it's better than cold hard crash?
def install_bonjour():
title = _("Install Bonjour")
description = _(
"%(appname)s has determined that your system may be "
"missing the Bonjour components, a standard part of "
"Mac OS X installations. Please review your Mac OS X installation.",
{"appname": app.config.get(prefs.SHORT_APP_NAME)}
)
dialogs.show_message(title, description)
logging.debug('install bonjour clicked')
| gpl-2.0 |
wimberosa/samba | source3/stf/example.py | 82 | 1087 | #! /usr/bin/env python
# Copyright (C) 2003 by Martin Pool <mbp@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""example of using ComfyChair"""
import comfychair
class OnePlusOne(comfychair.TestCase):
def runtest(self):
self.assert_(1 + 1 == 2)
class FailTest(comfychair.TestCase):
def runtest(self):
self.assert_(1 + 1 == 3)
tests = [OnePlusOne]
extra_tests = [FailTest]
if __name__ == '__main__':
comfychair.main(tests, extra_tests=extra_tests)
| gpl-3.0 |
proetman/checkit | speedcam/shaky_motion_detection.py | 1 | 4750 | # Stolen from here:
# https://github.com/slobdell/motion_detection/blob/master/shaky_motion_detection.py
import sys
import cv2
import numpy as np
IMAGE_WIDTH = 800
def resized_frame(frame):
height, width = frame.shape[0: 2]
desired_width = IMAGE_WIDTH
desired_to_actual = float(desired_width) / width
new_width = int(width * desired_to_actual)
new_height = int(height * desired_to_actual)
return cv2.resize(frame, (new_width, new_height))
class ShakyMotionDetector(object):
X_PIXEL_RANGE = 4
Y_PIXEL_RANGE = 2
def __init__(self, file_to_read):
self.file_to_read = file_to_read
self.capture = cv2.VideoCapture(self.file_to_read)
self.video_writer = None
self.frames_per_sec = 25
self.codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
self.frame_number = 0
video_filename = (file_to_read.split("/")[-1]).split(".")[0]
self.output_filename = "output_%s.avi" % video_filename
def _generate_working_frames(self):
while True:
success, frame_from_video = self.capture.read()
if not success:
break
frame_from_video = resized_frame(frame_from_video)
yield frame_from_video
def _generate_motion_detection_frames(self):
previous_frame = None
previous_previous_frame = None
for frame in self._generate_working_frames():
motion_detection_frame = None
if previous_previous_frame is not None:
motion_detection_frame = self._get_motion_detection_frame(previous_previous_frame, previous_frame, frame)
previous_previous_frame = previous_frame
previous_frame = frame
if motion_detection_frame is not None:
yield motion_detection_frame
def _get_motion_detection_frame(self, previous_previous_frame, previous_frame, frame):
d1 = cv2.absdiff(frame, previous_frame)
d2 = cv2.absdiff(previous_frame, previous_previous_frame)
motion_detection_frame = cv2.bitwise_xor(d1, d2)
return motion_detection_frame
def _remove_shakiness(self, frames):
clean_frames = []
min_previous_frame_count = 6
max_previous_frame_count = 20
for index, frame in enumerate(frames):
if cv2.waitKey(10) & 0xFF == ord('q'):
break
print("Processing {}".format(index))
previous_frames = frames[:index - min_previous_frame_count]
previous_frames = previous_frames[index - max_previous_frame_count:]
missing_frame_count = (max_previous_frame_count - min_previous_frame_count) - len(previous_frames)
if missing_frame_count > 0:
previous_frames = previous_frames + frames[-missing_frame_count:]
# cumulative_motion = np.sum(previous_frames, axis=0)
cumulative_motion = self._get_max_array(previous_frames)
final_frame = frame.astype(int) - cumulative_motion.astype(int)
final_frame[final_frame < 0] = 0
clean_frames.append(final_frame.astype(np.uint8))
print("Final sum: {}".format(np.sum(final_frame)))
cv2.imshow('step1', final_frame)
# cv2.imshow('orig', frame)
return clean_frames
def _get_max_array(self, array_list):
resultant_array = np.zeros(array_list[0].shape)
for array in array_list:
resultant_array = np.maximum(resultant_array, array)
for y_offset in range(-self.Y_PIXEL_RANGE, self.Y_PIXEL_RANGE + 1):
for x_offset in range(-self.X_PIXEL_RANGE, self.X_PIXEL_RANGE + 1):
offset_array = np.roll(resultant_array, x_offset, axis=1)
offset_array = np.roll(offset_array, y_offset, axis=0)
resultant_array = np.maximum(resultant_array, offset_array)
return resultant_array
def create(self):
all_frames = list(self._generate_motion_detection_frames())
all_frames = self._remove_shakiness(all_frames)
for motion_detection_frame in all_frames:
height, width = motion_detection_frame.shape[0: 2]
self.video_writer = self.video_writer or cv2.VideoWriter(self.output_filename, self.codec, self.frames_per_sec, (width, height))
self.video_writer.write(motion_detection_frame)
self.frame_number += 1
print("Writing {}".format(self.frame_number))
if self.video_writer is not None:
self.video_writer.release()
if __name__ == "__main__":
file_to_read = 'C:/Temp/x.mp4' # sys.argv[1]
ShakyMotionDetector(file_to_read) # .create() # no need to create output.
print('the end') | gpl-2.0 |
normanmaurer/autobahntestsuite-maven-plugin | src/main/resources/autobahntestsuite/case/case9_4_2.py | 14 | 1250 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_4_1 import Case9_4_1
class Case9_4_2(Case9_4_1):
DESCRIPTION = """Send fragmented binary message message with message payload of length 4 * 2**20 (4M). Sent out in fragments of 256."""
EXPECTATION = """Receive echo'ed binary message (with payload as sent)."""
def init(self):
self.DATALEN = 4 * 2**20
self.FRAGSIZE = 256
self.PAYLOAD = "*" * self.DATALEN
self.WAITSECS = 100
self.reportTime = True
| apache-2.0 |
Pajinek/spacewalk | backend/server/test/unit-test/rhnSQL/test_exceptions.py | 12 | 1223 | #!/usr/bin/python
# Copyright (c) 2005--2015 Red Hat, Inc.
#
#
#
import unittest
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.server import rhnSQL
import misc_functions
DB_SETTINGS = misc_functions.db_settings("oracle")
class ExceptionsTest(unittest.TestCase):
def test_failed_connection(self):
# Connect to localhost and look for db on a totally bogus port, this
# makes the test faster.
host = "localhost"
username = "x"
password = "y"
database = "z"
port = 9000
self.assertRaises(
rhnSQL.SQLConnectError,
rhnSQL.initDB,
"oracle",
host,
port,
database,
username,
password
)
try:
rhnSQL.initDB(
backend="oracle",
username=DB_SETTINGS["user"],
password=DB_SETTINGS["password"],
database=DB_SETTINGS["database"]
)
except:
self.fail(
"Exception raised while trying to connect to the db using proper settings. That's not expected to happen.")
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
ddd332/presto | presto-docs/target/sphinx/reportlab/lib/randomtext.py | 14 | 18655 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/randomtext.py
__version__=''' $Id: randomtext.py 3342 2008-12-12 15:55:34Z andy $ '''
###############################################################################
# generates so-called 'Greek Text' for use in filling documents.
###############################################################################
__doc__="""Like Lorem Ipsum, but more fun and extensible.
This module exposes a function randomText() which generates paragraphs.
These can be used when testing out document templates and stylesheets.
A number of 'themes' are provided - please contribute more!
We need some real Greek text too.
There are currently six themes provided:
STARTUP (words suitable for a business plan - or not as the case may be),
COMPUTERS (names of programming languages and operating systems etc),
BLAH (variations on the word 'blah'),
BUZZWORD (buzzword bingo),
STARTREK (Star Trek),
PRINTING (print-related terms)
PYTHON (snippets and quotes from Monty Python)
CHOMSKY (random lingusitic nonsense)
EXAMPLE USAGE:
from reportlab.lib import randomtext
print randomtext.randomText(randomtext.PYTHON, 10)
This prints a random number of random sentences (up to a limit
of ten) using the theme 'PYTHON'.
"""
#theme one :-)
STARTUP = ['strategic', 'direction', 'proactive', 'venture capital',
'reengineering', 'forecast', 'resources', 'SWOT analysis',
'forward-thinking', 'profit', 'growth', 'doubletalk', 'B2B', 'B2C',
'venture capital', 'IPO', "NASDAQ meltdown - we're all doomed!"]
#theme two - computery things.
COMPUTERS = ['Python', 'Perl', 'Pascal', 'Java', 'Javascript',
'VB', 'Basic', 'LISP', 'Fortran', 'ADA', 'APL', 'C', 'C++',
'assembler', 'Larry Wall', 'Guido van Rossum', 'XML', 'HTML',
'cgi', 'cgi-bin', 'Amiga', 'Macintosh', 'Dell', 'Microsoft',
'firewall', 'server', 'Linux', 'Unix', 'MacOS', 'BeOS', 'AS/400',
'sendmail', 'TCP/IP', 'SMTP', 'RFC822-compliant', 'dynamic',
'Internet', 'A/UX', 'Amiga OS', 'BIOS', 'boot managers', 'CP/M',
'DOS', 'file system', 'FreeBSD', 'Freeware', 'GEOS', 'GNU',
'Hurd', 'Linux', 'Mach', 'Macintosh OS', 'mailing lists', 'Minix',
'Multics', 'NetWare', 'NextStep', 'OS/2', 'Plan 9', 'Realtime',
'UNIX', 'VMS', 'Windows', 'X Windows', 'Xinu', 'security', 'Intel',
'encryption', 'PGP' , 'software', 'ActiveX', 'AppleScript', 'awk',
'BETA', 'COBOL', 'Delphi', 'Dylan', 'Eiffel', 'extreme programming',
'Forth', 'Fortran', 'functional languages', 'Guile', 'format your hard drive',
'Icon', 'IDL', 'Infer', 'Intercal', 'J', 'Java', 'JavaScript', 'CD-ROM',
'JCL', 'Lisp', '"literate programming"', 'Logo', 'MUMPS', 'C: drive',
'Modula-2', 'Modula-3', 'Oberon', 'Occam', 'OpenGL', 'parallel languages',
'Pascal', 'Perl', 'PL/I', 'PostScript', 'Prolog', 'hardware', 'Blue Screen of Death',
'Rexx', 'RPG', 'Scheme', 'scripting languages', 'Smalltalk', 'crash!', 'disc crash',
'Spanner', 'SQL', 'Tcl/Tk', 'TeX', 'TOM', 'Visual', 'Visual Basic', '4GL',
'VRML', 'Virtual Reality Modeling Language', 'difference engine', '...went into "yo-yo mode"',
'Sun', 'Sun Microsystems', 'Hewlett Packard', 'output device',
'CPU', 'memory', 'registers', 'monitor', 'TFT display', 'plasma screen',
'bug report', '"mis-feature"', '...millions of bugs!', 'pizza',
'"illiterate programming"','...lots of pizza!', 'pepperoni pizza',
'coffee', 'Jolt Cola[TM]', 'beer', 'BEER!']
#theme three - 'blah' - for when you want to be subtle. :-)
BLAH = ['Blah', 'BLAH', 'blahblah', 'blahblahblah', 'blah-blah',
'blah!', '"Blah Blah Blah"', 'blah-de-blah', 'blah?', 'blah!!!',
'blah...', 'Blah.', 'blah;', 'blah, Blah, BLAH!', 'Blah!!!']
#theme four - 'buzzword bingo' time!
BUZZWORD = ['intellectual capital', 'market segment', 'flattening',
'regroup', 'platform', 'client-based', 'long-term', 'proactive',
'quality vector', 'out of the loop', 'implement',
'streamline', 'cost-centered', 'phase', 'synergy',
'synergize', 'interactive', 'facilitate',
'appropriate', 'goal-setting', 'empowering', 'low-risk high-yield',
'peel the onion', 'goal', 'downsize', 'result-driven',
'conceptualize', 'multidisciplinary', 'gap analysis', 'dysfunctional',
'networking', 'knowledge management', 'goal-setting',
'mastery learning', 'communication', 'real-estate', 'quarterly',
'scalable', 'Total Quality Management', 'best of breed',
'nimble', 'monetize', 'benchmark', 'hardball',
'client-centered', 'vision statement', 'empowerment',
'lean & mean', 'credibility', 'synergistic',
'backward-compatible', 'hardball', 'stretch the envelope',
'bleeding edge', 'networking', 'motivation', 'best practice',
'best of breed', 'implementation', 'Total Quality Management',
'undefined', 'disintermediate', 'mindset', 'architect',
'gap analysis', 'morale', 'objective', 'projection',
'contribution', 'proactive', 'go the extra mile', 'dynamic',
'world class', 'real estate', 'quality vector', 'credibility',
'appropriate', 'platform', 'projection', 'mastery learning',
'recognition', 'quality', 'scenario', 'performance based',
'solutioning', 'go the extra mile', 'downsize', 'phase',
'networking', 'experiencing slippage', 'knowledge management',
'high priority', 'process', 'ethical', 'value-added', 'implement',
're-factoring', 're-branding', 'embracing change']
#theme five - Star Trek
STARTREK = ['Starfleet', 'Klingon', 'Romulan', 'Cardassian', 'Vulcan',
'Benzite', 'IKV Pagh', 'emergency transponder', 'United Federation of Planets',
'Bolian', "K'Vort Class Bird-of-Prey", 'USS Enterprise', 'USS Intrepid',
'USS Reliant', 'USS Voyager', 'Starfleet Academy', 'Captain Picard',
'Captain Janeway', 'Tom Paris', 'Harry Kim', 'Counsellor Troi',
'Lieutenant Worf', 'Lieutenant Commander Data', 'Dr. Beverly Crusher',
'Admiral Nakamura', 'Irumodic Syndrome', 'Devron system', 'Admiral Pressman',
'asteroid field', 'sensor readings', 'Binars', 'distress signal', 'shuttlecraft',
'cloaking device', 'shuttle bay 2', 'Dr. Pulaski', 'Lwaxana Troi', 'Pacifica',
'William Riker', "Chief O'Brian", 'Soyuz class science vessel', 'Wolf-359',
'Galaxy class vessel', 'Utopia Planitia yards', 'photon torpedo', 'Archer IV',
'quantum flux', 'spacedock', 'Risa', 'Deep Space Nine', 'blood wine',
'quantum torpedoes', 'holodeck', 'Romulan Warbird', 'Betazoid', 'turbolift', 'battle bridge',
'Memory Alpha', '...with a phaser!', 'Romulan ale', 'Ferrengi', 'Klingon opera',
'Quark', 'wormhole', 'Bajoran', 'cruiser', 'warship', 'battlecruiser', '"Intruder alert!"',
'scout ship', 'science vessel', '"Borg Invasion imminent!" ', '"Abandon ship!"',
'Red Alert!', 'warp-core breech', '"All hands abandon ship! This is not a drill!"']
#theme six - print-related terms
PRINTING = ['points', 'picas', 'leading', 'kerning', 'CMYK', 'offset litho',
'type', 'font family', 'typography', 'type designer',
'baseline', 'white-out type', 'WOB', 'bicameral', 'bitmap',
'blockletter', 'bleed', 'margin', 'body', 'widow', 'orphan',
'cicero', 'cursive', 'letterform', 'sidehead', 'dingbat', 'leader',
'DPI', 'drop-cap', 'paragraph', 'En', 'Em', 'flush left', 'left justified',
'right justified', 'centered', 'italic', 'Latin letterform', 'ligature',
'uppercase', 'lowercase', 'serif', 'sans-serif', 'weight', 'type foundry',
'fleuron', 'folio', 'gutter', 'whitespace', 'humanist letterform', 'caption',
'page', 'frame', 'ragged setting', 'flush-right', 'rule', 'drop shadows',
'prepress', 'spot-colour', 'duotones', 'colour separations', 'four-colour printing',
'Pantone[TM]', 'service bureau', 'imagesetter']
#it had to be done!...
#theme seven - the "full Monty"!
PYTHON = ['Good evening ladies and Bruces','I want to buy some cheese', 'You do have some cheese, do you?',
"Of course sir, it's a cheese shop sir, we've got...",'discipline?... naked? ... With a melon!?',
'The Church Police!!' , "There's a dead bishop on the landing", 'Would you like a twist of lemming sir?',
'"Conquistador Coffee brings a new meaning to the word vomit"','Your lupins please',
'Crelm Toothpaste, with the miracle ingredient Fraudulin',
"Well there's the first result and the Silly Party has held Leicester.",
'Hello, I would like to buy a fish license please', "Look, it's people like you what cause unrest!",
"When we got home, our Dad would thrash us to sleep with his belt!", 'Luxury', "Gumby Brain Specialist",
"My brain hurts!!!", "My brain hurts too.", "How not to be seen",
"In this picture there are 47 people. None of them can be seen",
"Mrs Smegma, will you stand up please?",
"Mr. Nesbitt has learned the first lesson of 'Not Being Seen', not to stand up.",
"My hovercraft is full of eels", "Ah. You have beautiful thighs.", "My nipples explode with delight",
"Drop your panties Sir William, I cannot wait 'til lunchtime",
"I'm a completely self-taught idiot.", "I always wanted to be a lumberjack!!!",
"Told you so!! Oh, coitus!!", "",
"Nudge nudge?", "Know what I mean!", "Nudge nudge, nudge nudge?", "Say no more!!",
"Hello, well it's just after 8 o'clock, and time for the penguin on top of your television set to explode",
"Oh, intercourse the penguin!!", "Funny that penguin being there, isn't it?",
"I wish to register a complaint.", "Now that's what I call a dead parrot", "Pining for the fjords???",
"No, that's not dead, it's ,uhhhh, resting", "This is an ex-parrot!!",
"That parrot is definitely deceased.", "No, no, no - it's spelt Raymond Luxury Yach-t, but it's pronounced 'Throatwobbler Mangrove'.",
"You're a very silly man and I'm not going to interview you.", "No Mungo... never kill a customer."
"And I'd like to conclude by putting my finger up my nose",
"egg and Spam", "egg bacon and Spam", "egg bacon sausage and Spam", "Spam bacon sausage and Spam",
"Spam egg Spam Spam bacon and Spam", "Spam sausage Spam Spam Spam bacon Spam tomato and Spam",
"Spam Spam Spam egg and Spam", "Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam",
"Spam!!", "I don't like Spam!!!", "You can't have egg, bacon, Spam and sausage without the Spam!",
"I'll have your Spam. I Love it!",
"I'm having Spam Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam and Spam",
"Have you got anything without Spam?", "There's Spam egg sausage and Spam, that's not got much Spam in it.",
"No one expects the Spanish Inquisition!!", "Our weapon is surprise, surprise and fear!",
"Get the comfy chair!", "Amongst our weaponry are such diverse elements as: fear, surprise, ruthless efficiency, an almost fanatical devotion to the Pope, and nice red uniforms - Oh damn!",
"Nobody expects the... Oh bugger!", "What swims in the sea and gets caught in nets? Henri Bergson?",
"Goats. Underwater goats with snorkels and flippers?", "A buffalo with an aqualung?",
"Dinsdale was a looney, but he was a happy looney.", "Dinsdale!!",
"The 127th Upper-Class Twit of the Year Show", "What a great Twit!",
"thought by many to be this year's outstanding twit",
"...and there's a big crowd here today to see these prize idiots in action.",
"And now for something completely different.", "Stop that, it's silly",
"We interrupt this program to annoy you and make things generally irritating",
"This depraved and degrading spectacle is going to stop right now, do you hear me?",
"Stop right there!", "This is absolutely disgusting and I'm not going to stand for it",
"I object to all this sex on the television. I mean, I keep falling off",
"Right! Stop that, it's silly. Very silly indeed", "Very silly indeed", "Lemon curry?",
"And now for something completely different, a man with 3 buttocks",
"I've heard of unisex, but I've never had it", "That's the end, stop the program! Stop it!"]
leadins=[
"To characterize a linguistic level L,",
"On the other hand,",
"This suggests that",
"It appears that",
"Furthermore,",
"We will bring evidence in favor of the following thesis: ",
"To provide a constituent structure for T(Z,K),",
"From C1, it follows that",
"For any transformation which is sufficiently diversified in application to be of any interest,",
"Analogously,",
"Clearly,",
"Note that",
"Of course,",
"Suppose, for instance, that",
"Thus",
"With this clarification,",
"Conversely,",
"We have already seen that",
"By combining adjunctions and certain deformations,",
"I suggested that these results would follow from the assumption that",
"If the position of the trace in (99c) were only relatively inaccessible to movement,",
"However, this assumption is not correct, since",
"Comparing these examples with their parasitic gap counterparts in (96) and (97), we see that",
"In the discussion of resumptive pronouns following (81),",
"So far,",
"Nevertheless,",
"For one thing,",
"Summarizing, then, we assume that",
"A consequence of the approach just outlined is that",
"Presumably,",
"On our assumptions,",
"It may be, then, that",
"It must be emphasized, once again, that",
"Let us continue to suppose that",
"Notice, incidentally, that",
"A majority of informed linguistic specialists agree that",
]
subjects = [
"the notion of level of grammaticalness",
"a case of semigrammaticalness of a different sort",
"most of the methodological work in modern linguistics",
"a subset of English sentences interesting on quite independent grounds",
"the natural general principle that will subsume this case",
"an important property of these three types of EC",
"any associated supporting element",
"the appearance of parasitic gaps in domains relatively inaccessible to ordinary extraction",
"the speaker-hearer's linguistic intuition",
"the descriptive power of the base component",
"the earlier discussion of deviance",
"this analysis of a formative as a pair of sets of features",
"this selectionally introduced contextual feature",
"a descriptively adequate grammar",
"the fundamental error of regarding functional notions as categorial",
"relational information",
"the systematic use of complex symbols",
"the theory of syntactic features developed earlier",
]
verbs= [
"can be defined in such a way as to impose",
"delimits",
"suffices to account for",
"cannot be arbitrary in",
"is not subject to",
"does not readily tolerate",
"raises serious doubts about",
"is not quite equivalent to",
"does not affect the structure of",
"may remedy and, at the same time, eliminate",
"is not to be considered in determining",
"is to be regarded as",
"is unspecified with respect to",
"is, apparently, determined by",
"is necessary to impose an interpretation on",
"appears to correlate rather closely with",
"is rather different from",
]
objects = [
"problems of phonemic and morphological analysis.",
"a corpus of utterance tokens upon which conformity has been defined by the paired utterance test.",
"the traditional practice of grammarians.",
"the levels of acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g. (98d)).",
"a stipulation to place the constructions into these various categories.",
"a descriptive fact.",
"a parasitic gap construction.",
"the extended c-command discussed in connection with (34).",
"the ultimate standard that determines the accuracy of any proposed grammar.",
"the system of base rules exclusive of the lexicon.",
"irrelevant intervening contexts in selectional rules.",
"nondistinctness in the sense of distinctive feature theory.",
"a general convention regarding the forms of the grammar.",
"an abstract underlying order.",
"an important distinction in language use.",
"the requirement that branching is not tolerated within the dominance scope of a complex symbol.",
"the strong generative capacity of the theory.",
]
def format_wisdom(text,line_length=72):
try:
import textwrap
return textwrap.fill(text, line_length)
except:
return text
def chomsky(times = 1):
if not isinstance(times, int):
return format_wisdom(__doc__)
import random
prevparts = []
newparts = []
output = []
for i in xrange(times):
for partlist in (leadins, subjects, verbs, objects):
while 1:
part = random.choice(partlist)
if part not in prevparts:
break
newparts.append(part)
output.append(' '.join(newparts))
prevparts = newparts
newparts = []
return format_wisdom(' '.join(output))
from reportlab import rl_config
if rl_config.invariant:
if not getattr(rl_config,'_random',None):
rl_config._random = 1
import random
random.seed(2342471922L)
del random
del rl_config
def randomText(theme=STARTUP, sentences=5):
#this may or may not be appropriate in your company
if type(theme)==type(''):
if theme.lower()=='chomsky': return chomsky(sentences)
elif theme.upper() in ('STARTUP','COMPUTERS','BLAH','BUZZWORD','STARTREK','PRINTING','PYTHON'):
theme = globals()[theme]
else:
raise ValueError('Unknown theme "%s"' % theme)
from random import randint, choice
RANDOMWORDS = theme
#sentences = 5
output = ""
for sentenceno in range(randint(1,sentences)):
output = output + 'Blah'
for wordno in range(randint(10,25)):
if randint(0,4)==0:
word = choice(RANDOMWORDS)
else:
word = 'blah'
output = output + ' ' +word
output = output+'. '
return output
if __name__=='__main__':
print chomsky(5)
| apache-2.0 |
apbard/scipy | scipy/sparse/csc.py | 24 | 7786 | """Compressed Sparse Column matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csc_matrix', 'isspmatrix_csc']
import numpy as np
from .base import spmatrix
from ._sparsetools import csc_tocsr
from . import _sparsetools
from .sputils import upcast, isintlike, IndexMixin, get_index_dtype
from .compressed import _cs_matrix
class csc_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Column matrix
This can be instantiated in several ways:
csc_matrix(D)
with a dense matrix or rank-2 ndarray D
csc_matrix(S)
with another sparse matrix S (equivalent to S.tocsc())
csc_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csc_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSC representation where the row indices for
column i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding values are stored in
``data[indptr[i]:indptr[i+1]]``. If the shape parameter is
not supplied, the matrix dimensions are inferred from
the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
CSC format index array
indptr
CSC format index pointer array
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSC format
- efficient arithmetic operations CSC + CSC, CSC * CSC, etc.
- efficient column slicing
- fast matrix vector products (CSR, BSR may be faster)
Disadvantages of the CSC format
- slow row slicing operations (consider CSR)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> csc_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 2, 2, 0, 1, 2])
>>> col = np.array([0, 0, 1, 2, 2, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csc_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 4],
[0, 0, 5],
[2, 3, 6]])
"""
format = 'csc'
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
from .csr import csr_matrix
return csr_matrix((self.data, self.indices,
self.indptr), (N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def __iter__(self):
for r in self.tocsr():
yield r
def tocsc(self, copy=False):
if copy:
return self.copy()
else:
return self
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocsr(self, copy=False):
M,N = self.shape
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csc_tocsr(M, N,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
A = csr_matrix((data, indices, indptr), shape=self.shape, copy=False)
A.has_sorted_indices = True
return A
tocsr.__doc__ = spmatrix.tocsr.__doc__
def __getitem__(self, key):
# Use CSR to implement fancy indexing.
row, col = self._unpack_index(key)
# Things that return submatrices. row or col is a int or slice.
if (isinstance(row, slice) or isinstance(col, slice) or
isintlike(row) or isintlike(col)):
return self.T[col, row].T
# Things that return a sequence of values.
else:
return self.T[col, row]
def nonzero(self):
# CSC can't use _cs_matrix's .nonzero method because it
# returns the indices sorted for self transposed.
# Get row and col indices, from _cs_matrix.tocoo
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
# Remove explicit zeros
nz_mask = self.data != 0
row = row[nz_mask]
col = col[nz_mask]
# Sort them to be in C-style order
ind = np.argsort(row, kind='mergesort')
row = row[ind]
col = col[ind]
return row, col
nonzero.__doc__ = _cs_matrix.nonzero.__doc__
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
# we convert to CSR to maintain compatibility with old impl.
# in spmatrix.getrow()
return self._get_submatrix(i, slice(None)).tocsr()
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSC matrix (column vector).
"""
M, N = self.shape
i = int(i)
if i < 0:
i += N
if i < 0 or i >= N:
raise IndexError('index (%d) out of range' % i)
idx = slice(*self.indptr[i:i+2])
data = self.data[idx].copy()
indices = self.indices[idx].copy()
indptr = np.array([0, len(indices)], dtype=self.indptr.dtype)
return csc_matrix((data, indices, indptr), shape=(M, 1),
dtype=self.dtype, copy=False)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self, x):
"""swap the members of x if this is a column-oriented matrix
"""
return x[1], x[0]
def isspmatrix_csc(x):
"""Is x of csc_matrix type?
Parameters
----------
x
object to check for being a csc matrix
Returns
-------
bool
True if x is a csc matrix, False otherwise
Examples
--------
>>> from scipy.sparse import csc_matrix, isspmatrix_csc
>>> isspmatrix_csc(csc_matrix([[5]]))
True
>>> from scipy.sparse import csc_matrix, csr_matrix, isspmatrix_csc
>>> isspmatrix_csc(csr_matrix([[5]]))
False
"""
return isinstance(x, csc_matrix)
| bsd-3-clause |
petrjasek/superdesk-ntb | server/ntb/macros/set_desk_metadata_macro.py | 2 | 1042 |
from superdesk import get_resource_service
from superdesk.metadata.item import CONTENT_STATE
FIELD_SCHEME_MAP = {
'subject': 'category',
'genre': 'genre_custom',
'anpa_category': None,
}
def callback(item, **kwargs):
if item.get('state') not in (CONTENT_STATE.INGESTED, ):
return
try:
template_id = kwargs["desk"]["default_content_template"]
except KeyError:
return
template = get_resource_service('content_templates').find_one(req=None, _id=template_id)
if not template:
return
template_data = template.get("data") or {}
for field, scheme in FIELD_SCHEME_MAP.items():
item.setdefault(field, [])
item[field] = [val for val in item[field] if val.get('scheme') != scheme]
if template_data.get(field):
for val in template_data[field]:
if val.get('scheme') == scheme:
item[field].append(val)
name = 'Set Desk Metadata'
label = 'Set Desk Metadata'
access_type = 'frontend'
action_type = 'direct'
| agpl-3.0 |
yajiedesign/mxnet | python/mxnet/profiler.py | 6 | 18359 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""Profiler setting methods."""
import ctypes
import contextlib
import contextvars
import warnings
from .base import _LIB, check_call, c_str, ProfileHandle, c_str_array, py_str, KVStoreHandle
profiler_kvstore_handle = KVStoreHandle()
def set_kvstore_handle(handle):
global profiler_kvstore_handle
profiler_kvstore_handle = handle
def set_config(**kwargs):
"""Set up the configure of profiler (only accepts keyword arguments).
Parameters
----------
filename : string,
output file for profile data
gpu_memory_profile_filename_prefix : string
filename prefix for the GPU memory profile
profile_all : boolean,
all profile types enabled
profile_symbolic : boolean,
whether to profile symbolic operators
profile_imperative : boolean,
whether to profile imperative operators
profile_memory : boolean,
whether to profile memory usage
profile_api : boolean,
whether to profile the C API
continuous_dump : boolean,
whether to periodically dump profiling data to file
dump_period : float,
seconds between profile data dumps
aggregate_stats : boolean,
whether to maintain aggregate stats in memory for console
dump. Has some negative performance impact.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
kk = kwargs.keys()
vv = kwargs.values()
check_call(_LIB.MXSetProcessProfilerConfig(len(kwargs),
c_str_array([key for key in kk]),
c_str_array([str(val) for val in vv]),
profiler_kvstore_handle))
def profiler_set_config(mode='symbolic', filename='profile.json'):
"""Set up the configure of profiler (Deprecated).
Parameters
----------
mode : string, optional
Indicates whether to enable the profiler, can
be 'symbolic', or 'all'. Defaults to `symbolic`.
filename : string, optional
The name of output trace file. Defaults to 'profile.json'.
"""
warnings.warn('profiler.profiler_set_config() is deprecated. '
'Please use profiler.set_config() instead')
keys = c_str_array([key for key in ["profile_" + mode, "filename"]])
values = c_str_array([str(val) for val in [True, filename]])
assert len(keys) == len(values)
check_call(_LIB.MXSetProcessProfilerConfig(len(keys), keys, values, profiler_kvstore_handle))
def set_state(state='stop', profile_process='worker'):
"""Set up the profiler state to 'run' or 'stop'.
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
state2int = {'stop': 0, 'run': 1}
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXSetProcessProfilerState(ctypes.c_int(state2int[state]),
profile_process2int[profile_process],
profiler_kvstore_handle))
def profiler_set_state(state='stop'):
"""Set up the profiler state to 'run' or 'stop' (Deprecated).
Parameters
----------
state : string, optional
Indicates whether to run the profiler, can
be 'stop' or 'run'. Default is `stop`.
"""
warnings.warn('profiler.profiler_set_state() is deprecated. '
'Please use profiler.set_state() instead')
set_state(state)
def dump(finished=True, profile_process='worker'):
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally.
Parameters
----------
finished : boolean
Indicates whether to stop statistic output (dumping) after this dump.
Default is True
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
fin = 1 if finished is True else 0
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXDumpProcessProfile(fin,
profile_process2int[profile_process],
profiler_kvstore_handle))
def dump_profile():
"""Dump profile and stop profiler. Use this to save profile
in advance in case your program cannot exit normally."""
warnings.warn('profiler.dump_profile() is deprecated. '
'Please use profiler.dump() instead')
dump(True)
def dumps(reset=False, format='table', sort_by='total', ascending=False):
"""Return a printable string of aggregate profile stats.
Parameters
----------
reset: boolean
indicates whether to clean aggeregate statistical data collected up to this point
format: string
whether to return the aggregate stats in table of json format
can take 'table' or 'json'
defaults to 'table'
sort_by: string
can take 'total', 'avg', 'min', 'max', or 'count'
by which stat to sort the entries in each category
defaults to 'total'
ascending: boolean
whether to sort ascendingly
defaults to False
"""
debug_str = ctypes.c_char_p()
reset_to_int = {False: 0, True: 1}
format_to_int = {'table': 0, 'json': 1}
sort_by_to_int = {'total': 0, 'avg': 1, 'min': 2, 'max': 3, 'count': 4}
asc_to_int = {False: 0, True: 1}
assert format in format_to_int.keys(),\
"Invalid value provided for format: {0}. Support: 'table', 'json'".format(format)
assert sort_by in sort_by_to_int.keys(),\
"Invalid value provided for sort_by: {0}.\
Support: 'total', 'avg', 'min', 'max', 'count'"\
.format(sort_by)
assert ascending in asc_to_int.keys(),\
"Invalid value provided for ascending: {0}. Support: False, True".format(ascending)
assert reset in reset_to_int.keys(),\
"Invalid value provided for reset: {0}. Support: False, True".format(reset)
check_call(_LIB.MXAggregateProfileStatsPrint(ctypes.byref(debug_str),
reset_to_int[reset],
format_to_int[format],
sort_by_to_int[sort_by],
asc_to_int[ascending]))
return py_str(debug_str.value)
def pause(profile_process='worker'):
"""Pause profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(1),
profile_process2int[profile_process],
profiler_kvstore_handle))
def resume(profile_process='worker'):
"""Resume paused profiling.
Parameters
----------
profile_process : string
whether to profile kvstore `server` or `worker`.
server can only be profiled when kvstore is of type dist.
if this is not passed, defaults to `worker`
"""
profile_process2int = {'worker': 0, 'server': 1}
check_call(_LIB.MXProcessProfilePause(int(0),
profile_process2int[profile_process],
profiler_kvstore_handle))
class Domain(object):
"""Profiling domain, used to group sub-objects like tasks, counters, etc into categories
Serves as part of 'categories' for chrome://tracing
Note: Domain handles are never destroyed.
Parameters
----------
name : string
Name of the domain
"""
def __init__(self, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateDomain(c_str(self.name), ctypes.byref(self.handle)))
def __str__(self):
return self.name
def new_task(self, name):
"""Create new Task object owned by this domain
Parameters
----------
name : string
Name of the task
"""
return Task(self, name)
def new_frame(self, name):
"""Create new Frame object owned by this domain
Parameters
----------
name : string
Name of the frame
"""
return Frame(self, name)
def new_counter(self, name, value=None):
"""Create new Counter object owned by this domain
Parameters
----------
name : string
Name of the counter
"""
return Counter(self, name, value)
def new_marker(self, name):
"""Create new Marker object owned by this domain
Parameters
----------
name : string
Name of the marker
"""
return Marker(self, name)
class Task(object):
"""Profiling Task class.
A task is a logical unit of work performed by a particular thread.
Tasks can nest; thus, tasks typically correspond to functions, scopes, or a case block
in a switch statement.
You can use the Task API to assign tasks to threads.
This is different from Frame in that all profiling statistics for passes
through the task's begin and endpoints are accumulated together into a single statistical
analysys, rather than a separate analysis for each pass (as with a Frame)
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the task
"""
def __init__(self, domain, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateTask(domain.handle,
c_str(self.name),
ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Frame(object):
"""Profiling Frame class.
Use the frame API to insert calls to the desired places in your code and analyze
performance per frame, where frame is the time period between frame begin and end points.
When frames are displayed in Intel VTune Amplifier, they are displayed in a
separate track, so they provide a way to visually separate this data from normal task data.
This is different from Task in that each 'Frame' duration will be a discretely-numbered
event in the VTune output, as well as its rate (frame-rate) shown. This is analogous to
profiling each frame of some visual output, such as rendering a video game frame.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the frame
"""
def __init__(self, domain, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateFrame(domain.handle,
c_str(self.name),
ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Event(object):
"""Profiling Event class.
The event API is used to observe when demarcated events occur in your application, or to
identify how long it takes to execute demarcated regions of code. Set annotations in the
application to demarcate areas where events of interest occur.
After running analysis, you can see the events marked in the Timeline pane.
Event API is a per-thread function that works in resumed state.
This function does not work in paused state.
Parameters
----------
name : string
Name of the event
"""
def __init__(self, name):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateEvent(c_str(self.name), ctypes.byref(self.handle)))
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def start(self):
"""Start timing scope for this object"""
check_call(_LIB.MXProfileDurationStart(self.handle))
def stop(self):
"""Stop timing scope for this object"""
check_call(_LIB.MXProfileDurationStop(self.handle))
def __str__(self):
return self.name
class Counter(object):
"""Profiling Counter class.
The counter event can track a value as it changes over time.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the counter
value: integer, optional
Initial value of the counter
"""
def __init__(self, domain, name, value=None):
self.name = name
self.handle = ProfileHandle()
check_call(_LIB.MXProfileCreateCounter(domain.handle,
c_str(name),
ctypes.byref(self.handle)))
if value is not None:
self.set_value(value)
def __del__(self):
if self.handle is not None:
check_call(_LIB.MXProfileDestroyHandle(self.handle))
def set_value(self, value):
"""Set counter value.
Parameters
----------
value : int
Value for the counter
"""
check_call(_LIB.MXProfileSetCounter(self.handle, int(value)))
def increment(self, delta=1):
"""Increment counter value.
Parameters
----------
value_change : int
Amount by which to add to the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, int(delta)))
def decrement(self, delta=1):
"""Decrement counter value.
Parameters
----------
value_change : int
Amount by which to subtract from the counter
"""
check_call(_LIB.MXProfileAdjustCounter(self.handle, -int(delta)))
def __iadd__(self, delta):
self.increment(delta)
return self
def __isub__(self, delta):
self.decrement(delta)
return self
def __str__(self):
return self.name
class Marker(object):
"""Set marker for an instant in time.
The marker event marks a particular instant in time across some scope boundaries.
Parameters
----------
domain : Domain object
Domain to which this object belongs
name : string
Name of the marker
"""
def __init__(self, domain, name):
self.name = name
self.domain = domain
def mark(self, scope='process'): # pylint: disable=redefined-outer-name
"""Set up the profiler state to record operator.
Parameters
----------
scope : string, optional
Indicates what scope the marker should refer to.
Can be 'global', 'process', thread', task', and 'marker'
Default is `process`.
"""
check_call(_LIB.MXProfileSetMarker(self.domain.handle, c_str(self.name), c_str(scope)))
@contextlib.contextmanager
def scope(name='<unk>:', append_mode=True):
"""Assign the profiler scope for the GPU memory profiler.
It is implicitly invoked when the Gluon API is used.
Parameters
==========
name : Name of the Profiler Scope
append_mode : Whether to append the old profiler scope at the front.
"""
name = name + ":" if not name.endswith(":") else name
if append_mode and _current_scope.get() != "<unk>:":
name = _current_scope.get() + name
token = _current_scope.set(name)
# Invoke the C API to propagate the profiler scope information to the
# C++ backend.
check_call(_LIB.MXSetProfilerScope(c_str(name)))
yield name
_current_scope.reset(token)
# Invoke the C API once again to recover the previous scope information.
check_call(_LIB.MXSetProfilerScope(c_str(_current_scope.get())))
# initialize the default profiler scope
_current_scope = contextvars.ContextVar('profilerscope', default='<unk>:')
| apache-2.0 |
Twistbioscience/incubator-airflow | airflow/hooks/__init__.py | 2 | 2929 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
# ------------------------------------------------------------------------
#
# #TODO #FIXME Airflow 2.0
#
# Old import machinary below.
#
# This is deprecated but should be kept until Airflow 2.0
# for compatibility.
#
# ------------------------------------------------------------------------
# Imports the hooks dynamically while keeping the package API clean,
# abstracting the underlying modules
_hooks = {
'base_hook': ['BaseHook'],
'hive_hooks': [
'HiveCliHook',
'HiveMetastoreHook',
'HiveServer2Hook',
],
'hdfs_hook': ['HDFSHook'],
'webhdfs_hook': ['WebHDFSHook'],
'pig_hook': ['PigCliHook'],
'mysql_hook': ['MySqlHook'],
'postgres_hook': ['PostgresHook'],
'presto_hook': ['PrestoHook'],
'samba_hook': ['SambaHook'],
'sqlite_hook': ['SqliteHook'],
'S3_hook': ['S3Hook'],
'zendesk_hook': ['ZendeskHook'],
'http_hook': ['HttpHook'],
'druid_hook': ['DruidHook'],
'jdbc_hook': ['JdbcHook'],
'dbapi_hook': ['DbApiHook'],
'mssql_hook': ['MsSqlHook'],
'oracle_hook': ['OracleHook'],
}
import os as _os
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from airflow.utils.helpers import AirflowImporter
airflow_importer = AirflowImporter(sys.modules[__name__], _hooks)
def _integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import hooks_modules
for hooks_module in hooks_modules:
sys.modules[hooks_module.__name__] = hooks_module
globals()[hooks_module._name] = hooks_module
##########################################################
# TODO FIXME Remove in Airflow 2.0
if not _os.environ.get('AIRFLOW_USE_NEW_IMPORTS', False):
from zope.deprecation import deprecated as _deprecated
for _hook in hooks_module._objects:
hook_name = _hook.__name__
globals()[hook_name] = _hook
_deprecated(
hook_name,
"Importing plugin hook '{i}' directly from "
"'airflow.hooks' has been deprecated. Please "
"import from 'airflow.hooks.[plugin_module]' "
"instead. Support for direct imports will be dropped "
"entirely in Airflow 2.0.".format(i=hook_name))
| apache-2.0 |
KaranToor/MA450 | google-cloud-sdk/lib/third_party/apitools/gen/message_registry.py | 6 | 21525 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Message registry for apitools."""
import collections
import contextlib
import json
import six
from apitools.base.protorpclite import descriptor
from apitools.base.protorpclite import messages
from apitools.gen import extended_descriptor
from apitools.gen import util
TypeInfo = collections.namedtuple('TypeInfo', ('type_name', 'variant'))
class MessageRegistry(object):
"""Registry for message types.
This closely mirrors a messages.FileDescriptor, but adds additional
attributes (such as message and field descriptions) and some extra
code for validation and cycle detection.
"""
# Type information from these two maps comes from here:
# https://developers.google.com/discovery/v1/type-format
PRIMITIVE_TYPE_INFO_MAP = {
'string': TypeInfo(type_name='string',
variant=messages.StringField.DEFAULT_VARIANT),
'integer': TypeInfo(type_name='integer',
variant=messages.IntegerField.DEFAULT_VARIANT),
'boolean': TypeInfo(type_name='boolean',
variant=messages.BooleanField.DEFAULT_VARIANT),
'number': TypeInfo(type_name='number',
variant=messages.FloatField.DEFAULT_VARIANT),
'any': TypeInfo(type_name='extra_types.JsonValue',
variant=messages.Variant.MESSAGE),
}
PRIMITIVE_FORMAT_MAP = {
'int32': TypeInfo(type_name='integer',
variant=messages.Variant.INT32),
'uint32': TypeInfo(type_name='integer',
variant=messages.Variant.UINT32),
'int64': TypeInfo(type_name='string',
variant=messages.Variant.INT64),
'uint64': TypeInfo(type_name='string',
variant=messages.Variant.UINT64),
'double': TypeInfo(type_name='number',
variant=messages.Variant.DOUBLE),
'float': TypeInfo(type_name='number',
variant=messages.Variant.FLOAT),
'byte': TypeInfo(type_name='byte',
variant=messages.BytesField.DEFAULT_VARIANT),
'date': TypeInfo(type_name='extra_types.DateField',
variant=messages.Variant.STRING),
'date-time': TypeInfo(
type_name=('apitools.base.protorpclite.message_types.'
'DateTimeMessage'),
variant=messages.Variant.MESSAGE),
}
def __init__(self, client_info, names, description, root_package_dir,
base_files_package, protorpc_package):
self.__names = names
self.__client_info = client_info
self.__package = client_info.package
self.__description = util.CleanDescription(description)
self.__root_package_dir = root_package_dir
self.__base_files_package = base_files_package
self.__protorpc_package = protorpc_package
self.__file_descriptor = extended_descriptor.ExtendedFileDescriptor(
package=self.__package, description=self.__description)
# Add required imports
self.__file_descriptor.additional_imports = [
'from %s import messages as _messages' % self.__protorpc_package,
]
# Map from scoped names (i.e. Foo.Bar) to MessageDescriptors.
self.__message_registry = collections.OrderedDict()
# A set of types that we're currently adding (for cycle detection).
self.__nascent_types = set()
# A set of types for which we've seen a reference but no
# definition; if this set is nonempty, validation fails.
self.__unknown_types = set()
# Used for tracking paths during message creation
self.__current_path = []
# Where to register created messages
self.__current_env = self.__file_descriptor
# TODO(craigcitro): Add a `Finalize` method.
@property
def file_descriptor(self):
self.Validate()
return self.__file_descriptor
def WriteProtoFile(self, printer):
"""Write the messages file to out as proto."""
self.Validate()
extended_descriptor.WriteMessagesFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def WriteFile(self, printer):
"""Write the messages file to out."""
self.Validate()
extended_descriptor.WritePythonFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer)
def Validate(self):
mysteries = self.__nascent_types or self.__unknown_types
if mysteries:
raise ValueError('Malformed MessageRegistry: %s' % mysteries)
def __ComputeFullName(self, name):
return '.'.join(map(six.text_type, self.__current_path[:] + [name]))
def __AddImport(self, new_import):
if new_import not in self.__file_descriptor.additional_imports:
self.__file_descriptor.additional_imports.append(new_import)
def __DeclareDescriptor(self, name):
self.__nascent_types.add(self.__ComputeFullName(name))
def __RegisterDescriptor(self, new_descriptor):
"""Register the given descriptor in this registry."""
if not isinstance(new_descriptor, (
extended_descriptor.ExtendedMessageDescriptor,
extended_descriptor.ExtendedEnumDescriptor)):
raise ValueError('Cannot add descriptor of type %s' % (
type(new_descriptor),))
full_name = self.__ComputeFullName(new_descriptor.name)
if full_name in self.__message_registry:
raise ValueError(
'Attempt to re-register descriptor %s' % full_name)
if full_name not in self.__nascent_types:
raise ValueError('Directly adding types is not supported')
new_descriptor.full_name = full_name
self.__message_registry[full_name] = new_descriptor
if isinstance(new_descriptor,
extended_descriptor.ExtendedMessageDescriptor):
self.__current_env.message_types.append(new_descriptor)
elif isinstance(new_descriptor,
extended_descriptor.ExtendedEnumDescriptor):
self.__current_env.enum_types.append(new_descriptor)
self.__unknown_types.discard(full_name)
self.__nascent_types.remove(full_name)
def LookupDescriptor(self, name):
return self.__GetDescriptorByName(name)
def LookupDescriptorOrDie(self, name):
message_descriptor = self.LookupDescriptor(name)
if message_descriptor is None:
raise ValueError('No message descriptor named "%s"', name)
return message_descriptor
def __GetDescriptor(self, name):
return self.__GetDescriptorByName(self.__ComputeFullName(name))
def __GetDescriptorByName(self, name):
if name in self.__message_registry:
return self.__message_registry[name]
if name in self.__nascent_types:
raise ValueError(
'Cannot retrieve type currently being created: %s' % name)
return None
@contextlib.contextmanager
def __DescriptorEnv(self, message_descriptor):
# TODO(craigcitro): Typecheck?
previous_env = self.__current_env
self.__current_path.append(message_descriptor.name)
self.__current_env = message_descriptor
yield
self.__current_path.pop()
self.__current_env = previous_env
def AddEnumDescriptor(self, name, description,
enum_values, enum_descriptions):
"""Add a new EnumDescriptor named name with the given enum values."""
message = extended_descriptor.ExtendedEnumDescriptor()
message.name = self.__names.ClassName(name)
message.description = util.CleanDescription(description)
self.__DeclareDescriptor(message.name)
for index, (enum_name, enum_description) in enumerate(
zip(enum_values, enum_descriptions)):
enum_value = extended_descriptor.ExtendedEnumValueDescriptor()
enum_value.name = self.__names.NormalizeEnumName(enum_name)
if enum_value.name != enum_name:
message.enum_mappings.append(
extended_descriptor.ExtendedEnumDescriptor.JsonEnumMapping(
python_name=enum_value.name, json_name=enum_name))
self.__AddImport('from %s import encoding' %
self.__base_files_package)
enum_value.number = index
enum_value.description = util.CleanDescription(
enum_description or '<no description>')
message.values.append(enum_value)
self.__RegisterDescriptor(message)
def __DeclareMessageAlias(self, schema, alias_for):
"""Declare schema as an alias for alias_for."""
# TODO(craigcitro): This is a hack. Remove it.
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.alias_for = alias_for
self.__DeclareDescriptor(message.name)
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
self.__RegisterDescriptor(message)
def __AddAdditionalProperties(self, message, schema, properties):
"""Add an additionalProperties field to message."""
additional_properties_info = schema['additionalProperties']
entries_type_name = self.__AddAdditionalPropertyType(
message.name, additional_properties_info)
description = util.CleanDescription(
additional_properties_info.get('description'))
if description is None:
description = 'Additional properties of type %s' % message.name
attrs = {
'items': {
'$ref': entries_type_name,
},
'description': description,
'type': 'array',
}
field_name = 'additionalProperties'
message.fields.append(self.__FieldDescriptorFromProperties(
field_name, len(properties) + 1, attrs))
self.__AddImport('from %s import encoding' % self.__base_files_package)
message.decorators.append(
'encoding.MapUnrecognizedFields(%r)' % field_name)
def AddDescriptorFromSchema(self, schema_name, schema):
"""Add a new MessageDescriptor named schema_name based on schema."""
# TODO(craigcitro): Is schema_name redundant?
if self.__GetDescriptor(schema_name):
return
if schema.get('enum'):
self.__DeclareEnum(schema_name, schema)
return
if schema.get('type') == 'any':
self.__DeclareMessageAlias(schema, 'extra_types.JsonValue')
return
if schema.get('type') != 'object':
raise ValueError('Cannot create message descriptors for type %s',
schema.get('type'))
message = extended_descriptor.ExtendedMessageDescriptor()
message.name = self.__names.ClassName(schema['id'])
message.description = util.CleanDescription(schema.get(
'description', 'A %s object.' % message.name))
self.__DeclareDescriptor(message.name)
with self.__DescriptorEnv(message):
properties = schema.get('properties', {})
for index, (name, attrs) in enumerate(sorted(properties.items())):
field = self.__FieldDescriptorFromProperties(
name, index + 1, attrs)
message.fields.append(field)
if field.name != name:
message.field_mappings.append(
type(message).JsonFieldMapping(
python_name=field.name, json_name=name))
self.__AddImport(
'from %s import encoding' % self.__base_files_package)
if 'additionalProperties' in schema:
self.__AddAdditionalProperties(message, schema, properties)
self.__RegisterDescriptor(message)
def __AddAdditionalPropertyType(self, name, property_schema):
"""Add a new nested AdditionalProperty message."""
new_type_name = 'AdditionalProperty'
property_schema = dict(property_schema)
# We drop the description here on purpose, so the resulting
# messages are less repetitive.
property_schema.pop('description', None)
description = 'An additional property for a %s object.' % name
schema = {
'id': new_type_name,
'type': 'object',
'description': description,
'properties': {
'key': {
'type': 'string',
'description': 'Name of the additional property.',
},
'value': property_schema,
},
}
self.AddDescriptorFromSchema(new_type_name, schema)
return new_type_name
def __AddEntryType(self, entry_type_name, entry_schema, parent_name):
"""Add a type for a list entry."""
entry_schema.pop('description', None)
description = 'Single entry in a %s.' % parent_name
schema = {
'id': entry_type_name,
'type': 'object',
'description': description,
'properties': {
'entry': {
'type': 'array',
'items': entry_schema,
},
},
}
self.AddDescriptorFromSchema(entry_type_name, schema)
return entry_type_name
def __FieldDescriptorFromProperties(self, name, index, attrs):
"""Create a field descriptor for these attrs."""
field = descriptor.FieldDescriptor()
field.name = self.__names.CleanName(name)
field.number = index
field.label = self.__ComputeLabel(attrs)
new_type_name_hint = self.__names.ClassName(
'%sValue' % self.__names.ClassName(name))
type_info = self.__GetTypeInfo(attrs, new_type_name_hint)
field.type_name = type_info.type_name
field.variant = type_info.variant
if 'default' in attrs:
# TODO(craigcitro): Correctly handle non-primitive default values.
default = attrs['default']
if not (field.type_name == 'string' or
field.variant == messages.Variant.ENUM):
default = str(json.loads(default))
if field.variant == messages.Variant.ENUM:
default = self.__names.NormalizeEnumName(default)
field.default_value = default
extended_field = extended_descriptor.ExtendedFieldDescriptor()
extended_field.name = field.name
extended_field.description = util.CleanDescription(
attrs.get('description', 'A %s attribute.' % field.type_name))
extended_field.field_descriptor = field
return extended_field
@staticmethod
def __ComputeLabel(attrs):
if attrs.get('required', False):
return descriptor.FieldDescriptor.Label.REQUIRED
elif attrs.get('type') == 'array':
return descriptor.FieldDescriptor.Label.REPEATED
elif attrs.get('repeated'):
return descriptor.FieldDescriptor.Label.REPEATED
return descriptor.FieldDescriptor.Label.OPTIONAL
def __DeclareEnum(self, enum_name, attrs):
description = util.CleanDescription(attrs.get('description', ''))
enum_values = attrs['enum']
enum_descriptions = attrs.get(
'enumDescriptions', [''] * len(enum_values))
self.AddEnumDescriptor(enum_name, description,
enum_values, enum_descriptions)
self.__AddIfUnknown(enum_name)
return TypeInfo(type_name=enum_name, variant=messages.Variant.ENUM)
def __AddIfUnknown(self, type_name):
type_name = self.__names.ClassName(type_name)
full_type_name = self.__ComputeFullName(type_name)
if (full_type_name not in self.__message_registry.keys() and
type_name not in self.__message_registry.keys()):
self.__unknown_types.add(type_name)
def __GetTypeInfo(self, attrs, name_hint):
"""Return a TypeInfo object for attrs, creating one if needed."""
type_ref = self.__names.ClassName(attrs.get('$ref'))
type_name = attrs.get('type')
if not (type_ref or type_name):
raise ValueError('No type found for %s' % attrs)
if type_ref:
self.__AddIfUnknown(type_ref)
# We don't actually know this is a message -- it might be an
# enum. However, we can't check that until we've created all the
# types, so we come back and fix this up later.
return TypeInfo(
type_name=type_ref, variant=messages.Variant.MESSAGE)
if 'enum' in attrs:
enum_name = '%sValuesEnum' % name_hint
return self.__DeclareEnum(enum_name, attrs)
if 'format' in attrs:
type_info = self.PRIMITIVE_FORMAT_MAP.get(attrs['format'])
if type_info is None:
# If we don't recognize the format, the spec says we fall back
# to just using the type name.
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
return self.PRIMITIVE_TYPE_INFO_MAP[type_name]
raise ValueError('Unknown type/format "%s"/"%s"' % (
attrs['format'], type_name))
if type_info.type_name.startswith((
'apitools.base.protorpclite.message_types.',
'message_types.')):
self.__AddImport(
'from %s import message_types as _message_types' %
self.__protorpc_package)
if type_info.type_name.startswith('extra_types.'):
self.__AddImport(
'from %s import extra_types' % self.__base_files_package)
return type_info
if type_name in self.PRIMITIVE_TYPE_INFO_MAP:
type_info = self.PRIMITIVE_TYPE_INFO_MAP[type_name]
if type_info.type_name.startswith('extra_types.'):
self.__AddImport(
'from %s import extra_types' % self.__base_files_package)
return type_info
if type_name == 'array':
items = attrs.get('items')
if not items:
raise ValueError('Array type with no item type: %s' % attrs)
entry_name_hint = self.__names.ClassName(
items.get('title') or '%sListEntry' % name_hint)
entry_label = self.__ComputeLabel(items)
if entry_label == descriptor.FieldDescriptor.Label.REPEATED:
parent_name = self.__names.ClassName(
items.get('title') or name_hint)
entry_type_name = self.__AddEntryType(
entry_name_hint, items.get('items'), parent_name)
return TypeInfo(type_name=entry_type_name,
variant=messages.Variant.MESSAGE)
else:
return self.__GetTypeInfo(items, entry_name_hint)
elif type_name == 'any':
self.__AddImport('from %s import extra_types' %
self.__base_files_package)
return self.PRIMITIVE_TYPE_INFO_MAP['any']
elif type_name == 'object':
# TODO(craigcitro): Think of a better way to come up with names.
if not name_hint:
raise ValueError(
'Cannot create subtype without some name hint')
schema = dict(attrs)
schema['id'] = name_hint
self.AddDescriptorFromSchema(name_hint, schema)
self.__AddIfUnknown(name_hint)
return TypeInfo(
type_name=name_hint, variant=messages.Variant.MESSAGE)
raise ValueError('Unknown type: %s' % type_name)
def FixupMessageFields(self):
for message_type in self.file_descriptor.message_types:
self._FixupMessage(message_type)
def _FixupMessage(self, message_type):
with self.__DescriptorEnv(message_type):
for field in message_type.fields:
if field.field_descriptor.variant == messages.Variant.MESSAGE:
field_type_name = field.field_descriptor.type_name
field_type = self.LookupDescriptor(field_type_name)
if isinstance(field_type,
extended_descriptor.ExtendedEnumDescriptor):
field.field_descriptor.variant = messages.Variant.ENUM
for submessage_type in message_type.message_types:
self._FixupMessage(submessage_type)
| apache-2.0 |
gonboy/sl4a | python/src/Lib/dummy_threading.py | 321 | 2804 | """Faux ``threading`` version using ``dummy_thread`` instead of ``thread``.
The module ``_dummy_threading`` is added to ``sys.modules`` in order
to not have ``threading`` considered imported. Had ``threading`` been
directly imported it would have made all subsequent imports succeed
regardless of whether ``thread`` was available which is not desired.
"""
from sys import modules as sys_modules
import dummy_thread
# Declaring now so as to not have to nest ``try``s to get proper clean-up.
holding_thread = False
holding_threading = False
holding__threading_local = False
try:
# Could have checked if ``thread`` was not in sys.modules and gone
# a different route, but decided to mirror technique used with
# ``threading`` below.
if 'thread' in sys_modules:
held_thread = sys_modules['thread']
holding_thread = True
# Must have some module named ``thread`` that implements its API
# in order to initially import ``threading``.
sys_modules['thread'] = sys_modules['dummy_thread']
if 'threading' in sys_modules:
# If ``threading`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held_threading = sys_modules['threading']
holding_threading = True
del sys_modules['threading']
if '_threading_local' in sys_modules:
# If ``_threading_local`` is already imported, might as well prevent
# trying to import it more than needed by saving it if it is
# already imported before deleting it.
held__threading_local = sys_modules['_threading_local']
holding__threading_local = True
del sys_modules['_threading_local']
import threading
# Need a copy of the code kept somewhere...
sys_modules['_dummy_threading'] = sys_modules['threading']
del sys_modules['threading']
sys_modules['_dummy__threading_local'] = sys_modules['_threading_local']
del sys_modules['_threading_local']
from _dummy_threading import *
from _dummy_threading import __all__
finally:
# Put back ``threading`` if we overwrote earlier
if holding_threading:
sys_modules['threading'] = held_threading
del held_threading
del holding_threading
# Put back ``_threading_local`` if we overwrote earlier
if holding__threading_local:
sys_modules['_threading_local'] = held__threading_local
del held__threading_local
del holding__threading_local
# Put back ``thread`` if we overwrote, else del the entry we made
if holding_thread:
sys_modules['thread'] = held_thread
del held_thread
else:
del sys_modules['thread']
del holding_thread
del dummy_thread
del sys_modules
| apache-2.0 |
npdoty/pywikibot | tests/api_tests.py | 5 | 41039 | # -*- coding: utf-8 -*-
"""API test module."""
#
# (C) Pywikibot team, 2007-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import datetime
import types
import pywikibot.data.api as api
import pywikibot.family
import pywikibot.login
import pywikibot.page
import pywikibot.site
from pywikibot.throttle import Throttle
from pywikibot.tools import (
MediaWikiVersion,
PY2,
UnicodeType,
)
from tests.aspects import (
unittest,
TestCase,
DefaultSiteTestCase,
DefaultDrySiteTestCase,
)
from tests.utils import allowed_failure, FakeLoginManager, PatchedHttp
if not PY2:
from urllib.parse import unquote_to_bytes
else:
from urllib import unquote_plus as unquote_to_bytes
class TestAPIMWException(DefaultSiteTestCase):
"""Test raising an APIMWException."""
data = {'error': {'code': 'internal_api_error_fake',
'info': 'Fake error message'},
'servedby': 'unittest',
}
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
def test_API_error(self):
"""Test a static request."""
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True})
with PatchedHttp(api, self.data):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_ASCII(self):
"""Test a Page instance as parameter using ASCII chars."""
page = pywikibot.page.Page(self.site, 'ASCII')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
def test_API_error_encoding_Unicode(self):
"""Test a Page instance as parameter using non-ASCII chars."""
page = pywikibot.page.Page(self.site, 'Ümlä üt')
req = api.Request(site=self.site, parameters={'action': 'query',
'fake': True,
'titles': page})
self.assert_parameters = {'fake': ''}
with PatchedHttp(api, self._dummy_request):
self.assertRaises(api.APIMWException, req.submit)
class TestApiFunctions(DefaultSiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor with implicit site creation."""
req = api.Request(parameters={'action': 'test', 'foo': '',
'bar': 'test'})
self.assertTrue(req)
self.assertEqual(req.site, self.get_site())
class TestDryApiFunctions(DefaultDrySiteTestCase):
"""API Request object test class."""
def testObjectCreation(self):
"""Test api.Request() constructor."""
mysite = self.get_site()
req = api.Request(site=mysite, parameters={'action': 'test', 'foo': '',
'bar': 'test'})
self.assertTrue(req)
self.assertEqual(req.site, mysite)
self.assertIn("foo", req._params)
self.assertEqual(req["bar"], ["test"])
# test item assignment
req["one"] = "1"
self.assertEqual(req._params['one'], ["1"])
# test compliance with dict interface
# req.keys() should contain "action", "foo", "bar", "one"
self.assertEqual(len(req.keys()), 4)
self.assertIn("test", req._encoded_items().values())
for item in req.items():
self.assertEqual(len(item), 2, item)
def test_mixed_mode(self):
"""Test if parameters is used with kwargs."""
req1 = api.Request(site=self.site, action='test', parameters='foo')
self.assertIn('parameters', req1._params)
req2 = api.Request(site=self.site, parameters={'action': 'test',
'parameters': 'foo'})
self.assertEqual(req2['parameters'], ['foo'])
self.assertEqual(req1._params, req2._params)
class TestParamInfo(DefaultSiteTestCase):
"""Test ParamInfo."""
def test_init(self):
"""Test common initialization."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
len(pi.preloaded_modules))
self.assertIn('info', pi.query_modules)
self.assertIn('login', pi._action_modules)
def test_init_query_first(self):
"""Test init where it first adds query and then main."""
def patched_generate_submodules(modules):
# Change the query such that query is handled before main
modules = set(modules)
if 'main' in modules:
assert 'query' in modules
modules.discard('main')
modules = list(modules) + ['main']
else:
assert 'query' not in modules
original_generate_submodules(modules)
pi = api.ParamInfo(self.site, set(['query', 'main']))
self.assertEqual(len(pi), 0)
original_generate_submodules = pi._generate_submodules
pi._generate_submodules = patched_generate_submodules
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
def test_init_pageset(self):
"""Test initializing with only the pageset."""
site = self.get_site()
self.assertNotIn('query', api.ParamInfo.init_modules)
pi = api.ParamInfo(site, set(['pageset']))
self.assertNotIn('query', api.ParamInfo.init_modules)
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
if 'query' in pi.preloaded_modules:
self.assertIn('query', pi._paraminfo)
self.assertEqual(len(pi), 4)
else:
self.assertNotIn('query', pi._paraminfo)
self.assertEqual(len(pi), 3)
self.assertEqual(len(pi),
len(pi.preloaded_modules))
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
generators_param = pi.parameter('pageset', 'generator')
self.assertGreater(len(generators_param['type']), 1)
def test_generators(self):
"""Test requesting the generator parameter."""
site = self.get_site()
pi = api.ParamInfo(site, set(['pageset', 'query']))
self.assertEqual(len(pi), 0)
pi._init()
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertIn('pageset', pi._paraminfo)
self.assertIn('query', pi._paraminfo)
if MediaWikiVersion(site.version()) >= MediaWikiVersion("1.21"):
# 'generator' was added to 'pageset' in 1.21
pageset_generators_param = pi.parameter('pageset', 'generator')
query_generators_param = pi.parameter('query', 'generator')
self.assertEqual(pageset_generators_param, query_generators_param)
def test_with_module_info(self):
"""Test requesting the module info."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['info']['prefix'], 'in')
param = pi.parameter('info', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('protection', param['type'])
def test_with_module_revisions(self):
"""Test requesting the module revisions."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['revisions'])
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertEqual(pi['revisions']['prefix'], 'rv')
param = pi.parameter('revisions', 'prop')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'prop')
self.assertNotIn('deprecated', param)
self.assertIsInstance(param['type'], list)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertIn('user', param['type'])
def test_multiple_modules(self):
"""Test requesting multiple modules in one fetch."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch(['info', 'revisions'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('query+revisions', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
2 + len(pi.preloaded_modules))
def test_with_invalid_module(self):
"""Test requesting different kind of invalid modules."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertEqual(len(pi), 0)
pi.fetch('foobar')
self.assertNotIn('foobar', pi._paraminfo)
self.assertRaises(KeyError, pi.__getitem__, 'foobar')
self.assertRaises(KeyError, pi.__getitem__, 'foobar+foobar')
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) < MediaWikiVersion("1.12"):
return
self.assertEqual(len(pi),
len(pi.preloaded_modules))
def test_submodules(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertIn('query', pi._modules)
self.assertIsInstance(pi._modules['query'], frozenset)
self.assertIn('revisions', pi._modules['query'])
self.assertEqual(pi.submodules('query'), pi.query_modules)
for mod in pi.submodules('query', True):
self.assertEqual(mod[:6], 'query+')
self.assertEqual(mod[6:], pi[mod]['name'])
self.assertEqual(mod, pi[mod]['path'])
self.assertRaises(KeyError, pi.__getitem__, 'query+foobar')
self.assertRaises(KeyError, pi.submodules, 'edit')
def test_query_modules_with_limits(self):
"""Test query_modules_with_limits property."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.query_modules_with_limits)
self.assertNotIn('info', pi.query_modules_with_limits)
def test_modules(self):
"""Test v1.8 modules exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.modules)
self.assertIn('help', pi.modules)
self.assertIn('allpages', pi.modules)
for mod in pi.modules:
self.assertNotIn('+', mod)
def test_module_paths(self):
"""Test module paths use the complete paths."""
pi = api.ParamInfo(self.site)
self.assertIn('help', pi.module_paths)
self.assertNotIn('revisions', pi.module_paths)
self.assertIn('query+revisions', pi.module_paths)
self.assertNotIn('allpages', pi.module_paths)
self.assertIn('query+allpages', pi.module_paths)
def test_prefixes(self):
"""Test v1.8 module prefixes exist."""
site = self.get_site()
pi = api.ParamInfo(site)
self.assertIn('revisions', pi.prefixes)
self.assertIn('login', pi.prefixes)
self.assertIn('allpages', pi.prefixes)
def test_prefix_map(self):
"""Test module prefixes use the path."""
pi = api.ParamInfo(self.site)
self.assertIn('query+revisions', pi.prefix_map)
self.assertIn('login', pi.prefix_map)
self.assertIn('query+allpages', pi.prefix_map)
for mod in pi.prefix_map:
self.assertEqual(mod, pi[mod]['path'])
def test_attributes(self):
"""Test attributes method."""
pi = api.ParamInfo(self.site)
attributes = pi.attributes('mustbeposted')
self.assertIn('edit', attributes)
for mod, value in attributes.items():
self.assertEqual(mod, pi[mod]['path'])
self.assertEqual(value, '')
def test_old_mode(self):
"""Test the old mode explicitly."""
site = self.get_site()
pi = api.ParamInfo(site, modules_only_mode=False)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
if MediaWikiVersion(self.site.version()) >= MediaWikiVersion("1.12"):
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
def test_new_mode(self):
"""Test the new modules-only mode explicitly."""
site = self.get_site()
if MediaWikiVersion(site.version()) < MediaWikiVersion('1.25wmf4'):
raise unittest.SkipTest(
"version %s doesn't support the new paraminfo api"
% site.version())
pi = api.ParamInfo(site, modules_only_mode=True)
pi.fetch(['info'])
self.assertIn('query+info', pi._paraminfo)
self.assertIn('main', pi._paraminfo)
self.assertIn('paraminfo', pi._paraminfo)
self.assertEqual(len(pi),
1 + len(pi.preloaded_modules))
self.assertIn('revisions', pi.prefixes)
class TestOtherSubmodule(TestCase):
"""Test handling multiple different modules having submodules."""
family = 'mediawiki'
code = 'mediawiki'
def test_other_submodule(self):
"""Test another module apart from query having submodules."""
pi = api.ParamInfo(self.site)
self.assertFalse(pi._modules)
pi.fetch(['query'])
self.assertNotIn('flow', pi._modules)
pi.fetch(['flow'])
self.assertIn('flow', pi._modules)
other_modules = set()
for modules in pi._modules.values():
self.assertIsInstance(modules, frozenset)
other_modules |= modules
other_modules -= pi.action_modules
other_modules -= pi.query_modules
self.assertLessEqual(other_modules & pi.submodules('flow'),
pi.submodules('flow'))
self.assertFalse(other_modules & pi.modules)
class TestParaminfoModules(DefaultSiteTestCase):
"""Test loading all paraminfo modules."""
def test_action_modules(self):
"""Test loading all action modules."""
self.site._paraminfo.fetch(self.site._paraminfo.action_modules)
def test_query_modules(self):
"""Test loading all query modules."""
self.site._paraminfo.fetch(self.site._paraminfo.query_modules)
class TestOptionSet(TestCase):
"""OptionSet class test class."""
family = 'wikipedia'
code = 'en'
def test_non_lazy_load(self):
"""Test OptionSet with initialised site."""
options = api.OptionSet(self.get_site(), 'recentchanges', 'show')
self.assertRaises(KeyError, options.__setitem__, 'invalid_name', True)
self.assertRaises(ValueError, options.__setitem__, 'anon', 'invalid_value')
options['anon'] = True
self.assertCountEqual(['anon'], options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(1, len(options))
self.assertEqual(['anon'], list(options))
self.assertEqual(['anon'], list(options.api_iter()))
options['bot'] = False
self.assertCountEqual(['anon'], options._enabled)
self.assertCountEqual(['bot'], options._disabled)
self.assertEqual(2, len(options))
self.assertEqual(['anon', 'bot'], list(options))
self.assertEqual(['anon', '!bot'], list(options.api_iter()))
options.clear()
self.assertEqual(set(), options._enabled)
self.assertEqual(set(), options._disabled)
self.assertEqual(0, len(options))
self.assertEqual([], list(options))
self.assertEqual([], list(options.api_iter()))
def test_lazy_load(self):
"""Test OptionSet with delayed site initialisation."""
options = api.OptionSet()
options['invalid_name'] = True
options['anon'] = True
self.assertIn('invalid_name', options._enabled)
self.assertEqual(2, len(options))
self.assertRaises(KeyError, options._set_site, self.get_site(),
'recentchanges', 'show')
self.assertEqual(2, len(options))
options._set_site(self.get_site(), 'recentchanges', 'show', True)
self.assertEqual(1, len(options))
self.assertRaises(TypeError, options._set_site, self.get_site(),
'recentchanges', 'show')
class TestDryOptionSet(DefaultDrySiteTestCase):
"""OptionSet class test class."""
def test_mutable_mapping(self):
"""Test keys, values and items from MutableMapping."""
options = api.OptionSet()
options['a'] = True
options['b'] = False
options['c'] = None
self.assertCountEqual(['a', 'b'], list(options.keys()))
self.assertCountEqual([True, False], list(options.values()))
self.assertEqual(set(), set(options.values()) - set([True, False]))
self.assertCountEqual([('a', True), ('b', False)], list(options.items()))
class TestDryPageGenerator(TestCase):
"""Dry API PageGenerator object test class."""
family = 'wikipedia'
code = 'en'
dry = True
# api.py sorts 'pages' using the string key, which is not a
# numeric comparison.
titles = ("Broadcaster (definition)", "Wiktionary", "Broadcaster.com",
"Wikipedia:Disambiguation")
def setUp(self):
"""Set up test case."""
super(TestDryPageGenerator, self).setUp()
mysite = self.get_site()
self.gen = api.PageGenerator(site=mysite,
generator="links",
titles="User:R'n'B")
# following test data is copied from an actual api.php response,
# but that query no longer matches this dataset.
# http://en.wikipedia.org/w/api.php?action=query&generator=links&titles=User:R%27n%27B
self.gen.request.submit = types.MethodType(lambda self: {
"query": {"pages": {"296589": {"pageid": 296589,
"ns": 0,
"title": "Broadcaster.com"
},
"13918157": {"pageid": 13918157,
"ns": 0,
"title": "Broadcaster (definition)"
},
"156658": {"pageid": 156658,
"ns": 0,
"title": "Wiktionary"
},
"47757": {"pageid": 47757,
"ns": 4,
"title": "Wikipedia:Disambiguation"
}
}
}
}, self.gen.request)
# On a dry site, the namespace objects only have canonical names.
# Add custom_name for this site namespace, to match the live site.
if 'Wikipedia' not in self.site.namespaces:
self.site.namespaces[4].custom_name = 'Wikipedia'
self.site.namespaces._namespace_names['wikipedia'] = self.site.namespaces[4]
def test_results(self):
"""Test that PageGenerator yields pages with expected attributes."""
self.assertPagelistTitles(self.gen, self.titles)
def test_initial_limit(self):
"""Test the default limit."""
self.assertEqual(self.gen.limit, None) # limit is initally None
def test_set_limit_as_number(self):
"""Test setting the limit using an int."""
for i in range(-2, 4):
self.gen.set_maximum_items(i)
self.assertEqual(self.gen.limit, i)
def test_set_limit_as_string(self):
"""Test setting the limit using an int cast into a string."""
for i in range(-2, 4):
self.gen.set_maximum_items(str(i))
self.assertEqual(self.gen.limit, i)
def test_set_limit_not_number(self):
"""Test setting the limit to not a number."""
with self.assertRaisesRegex(
ValueError,
r"invalid literal for int\(\) with base 10: 'test'"):
self.gen.set_maximum_items('test')
def test_limit_equal_total(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(4)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_one(self):
"""Test that PageGenerator yields the requested amount of pages."""
self.gen.set_maximum_items(1)
self.assertPagelistTitles(self.gen, self.titles[0:1])
def test_limit_zero(self):
"""Test that a limit of zero is the same as limit None."""
self.gen.set_maximum_items(0)
self.assertPagelistTitles(self.gen, self.titles)
def test_limit_omit(self):
"""Test that limit omitted is the same as limit None."""
self.gen.set_maximum_items(-1)
self.assertPagelistTitles(self.gen, self.titles)
def test_namespace(self):
"""Test PageGenerator set_namespace."""
self.assertRaises(AssertionError, self.gen.set_namespace, 0)
self.assertRaises(AssertionError, self.gen.set_namespace, 1)
self.assertRaises(AssertionError, self.gen.set_namespace, None)
class TestPropertyGenerator(TestCase):
"""API PropertyGenerator object test class."""
family = 'wikipedia'
code = 'en'
def test_info(self):
"""Test PropertyGenerator with prop 'info'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info",
titles='|'.join(titles))
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('lastrevid', pagedata)
count += 1
self.assertEqual(len(links), count)
def test_one_continuation(self):
"""Test PropertyGenerator with prop 'revisions'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
def test_two_continuations(self):
"""Test PropertyGenerator with prop 'revisions' and 'coordinates'."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=10))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|coordinates",
titles='|'.join(titles))
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
self.assertIn('revisions', pagedata)
self.assertIn('revid', pagedata['revisions'][0])
count += 1
self.assertEqual(len(links), count)
@allowed_failure
def test_many_continuations_limited(self):
"""Test PropertyGenerator with many limited props."""
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="revisions|info|categoryinfo|langlinks|templates",
rvprop="ids|flags|timestamp|user|comment|content",
titles='|'.join(titles))
# An APIError is raised if set_maximum_items is not called.
gen.set_maximum_items(-1) # suppress use of "rvlimit" parameter
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 6150
@allowed_failure
def test_two_continuations_limited(self):
"""Test PropertyGenerator with many limited props and continuations."""
# FIXME: test fails
mainpage = self.get_mainpage()
links = list(self.site.pagelinks(mainpage, total=30))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(5)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
# FIXME: AssertionError: 30 != 11550
# FIXME: test disabled as it takes longer than 10 minutes
def _test_two_continuations_limited_long_test(self):
"""Long duration test, with total & step that are a real scenario."""
mainpage = self.get_mainpage()
links = list(mainpage.backlinks(total=300))
titles = [l.title(withSection=False)
for l in links]
gen = api.PropertyGenerator(site=self.site,
prop="info|categoryinfo|langlinks|templates",
titles='|'.join(titles))
# Force the generator into continuation mode
gen.set_query_increment(50)
count = 0
for pagedata in gen:
self.assertIsInstance(pagedata, dict)
self.assertIn('pageid', pagedata)
count += 1
self.assertEqual(len(links), count)
class TestDryListGenerator(TestCase):
"""Test ListGenerator."""
family = 'wikipedia'
code = 'en'
dry = True
def setUp(self):
"""Set up test case."""
super(TestDryListGenerator, self).setUp()
mysite = self.get_site()
mysite._paraminfo['query+allpages'] = {
'prefix': 'ap',
'limit': {'max': 10},
'namespace': {'multi': True}
}
mysite._paraminfo.query_modules_with_limits = set(['allpages'])
self.gen = api.ListGenerator(listaction="allpages", site=mysite)
def test_namespace_none(self):
"""Test ListGenerator set_namespace with None."""
self.assertRaises(TypeError, self.gen.set_namespace, None)
def test_namespace_zero(self):
"""Test ListGenerator set_namespace with 0."""
self.gen.set_namespace(0)
class TestCachedRequest(DefaultSiteTestCase):
"""Test API Request caching.
This test class does not use the forced test caching.
"""
cached = False
def test_normal_use(self):
"""Test the caching of CachedRequest with an ordinary request."""
mysite = self.get_site()
mainpage = self.get_mainpage()
# Run the cached query three times to ensure the
# data returned is equal, and the last two have
# the same cache time.
params = {'action': 'query',
'prop': 'info',
'titles': mainpage.title(),
}
req1 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data1 = req1.submit()
req2 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data2 = req2.submit()
req3 = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
data3 = req3.submit()
self.assertEqual(data1, data2)
self.assertEqual(data2, data3)
self.assertIsNotNone(req2._cachetime)
self.assertIsNotNone(req3._cachetime)
self.assertEqual(req2._cachetime, req3._cachetime)
def test_internals(self):
"""Test the caching of CachedRequest by faking a unique request."""
mysite = self.get_site()
# Run tests on a missing page unique to this test run so it can
# not be cached the first request, but will be cached after.
now = datetime.datetime.now()
params = {'action': 'query',
'prop': 'info',
'titles': 'TestCachedRequest_test_internals ' + str(now),
}
req = api.CachedRequest(datetime.timedelta(minutes=10),
site=mysite, parameters=params)
rv = req._load_cache()
self.assertFalse(rv)
self.assertIsNone(req._data)
self.assertIsNone(req._cachetime)
data = req.submit()
self.assertIsNotNone(req._data)
self.assertIsNone(req._cachetime)
rv = req._load_cache()
self.assertTrue(rv)
self.assertIsNotNone(req._data)
self.assertIsNotNone(req._cachetime)
self.assertGreater(req._cachetime, now)
self.assertEqual(req._data, data)
class TestLazyLoginBase(TestCase):
"""
Test that it tries to login when read API access is denied.
Because there is no such family configured it creates an AutoFamily and
BaseSite on it's own. It's testing against steward.wikimedia.org.
These tests are split into two subclasses as only the first failed login
behaves as expected. All subsequent logins will raise an APIError, making
it impossible to test two scenarios with the same APISite object.
"""
hostname = 'steward.wikimedia.org'
@classmethod
def setUpClass(cls):
"""Set up steward Family."""
super(TestLazyLoginBase, cls).setUpClass()
fam = pywikibot.family.AutoFamily(
'steward', 'https://steward.wikimedia.org/w/api.php')
cls.site = pywikibot.site.APISite('steward', fam)
class TestLazyLoginNotExistUsername(TestLazyLoginBase):
"""Test missing username."""
# FIXME: due to limitations of LoginManager, it will ask the user
# for a password even if the username does not exist, and even if
# pywikibot is not connected to a tty. T100964
def setUp(self):
"""Patch the LoginManager to avoid UI interaction."""
super(TestLazyLoginNotExistUsername, self).setUp()
self.orig_login_manager = pywikibot.data.api.LoginManager
pywikibot.data.api.LoginManager = FakeLoginManager
def tearDown(self):
"""Restore the original LoginManager."""
pywikibot.data.api.LoginManager = self.orig_login_manager
super(TestLazyLoginNotExistUsername, self).tearDown()
def test_access_denied_notexist_username(self):
"""Test the query with a username which does not exist."""
self.site._username = ['Not registered username', None]
req = api.Request(site=self.site, parameters={'action': 'query'})
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestLazyLoginNoUsername(TestLazyLoginBase):
"""Test no username."""
def test_access_denied_no_username(self):
"""Test the query without a username."""
self.site._username = [None, None]
# FIXME: The following prevents LoginManager
# from loading the username from the config when the site
# username is None. i.e. site.login(user=None) means load
# username from the configuration.
if 'steward' in pywikibot.config.usernames:
del pywikibot.config.usernames['steward']
req = api.Request(site=self.site, parameters={'action': 'query'})
self.assertRaises(pywikibot.NoUsername, req.submit)
# FIXME: T100965
self.assertRaises(api.APIError, req.submit)
class TestBadTokenRecovery(TestCase):
"""Test that the request recovers from bad tokens."""
family = 'wikipedia'
code = 'test'
write = True
def test_bad_token(self):
"""Test the bad token recovery by corrupting the cache."""
site = self.get_site()
site.tokens._tokens.setdefault(site.user(), {})['edit'] = 'INVALID'
page = pywikibot.Page(site, 'Pywikibot bad token test')
page.text = ('This page is testing whether pywikibot-core rerequests '
'a token when a badtoken error was received.')
page.save(summary='Bad token test')
class TestUrlEncoding(TestCase):
"""Test encode_url() function."""
net = False
def test_url_encoding_from_list(self):
"""Test moving 'token' parameters from a list to the end."""
query = [('action', 'edit'), ('token', 'a'), ('supertoken', 'b'),
('text', 'text')]
expect = 'action=edit&text=text&token=a&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_dict(self):
"""Test moving 'token' parameters from a dict to the end."""
# do not add other keys because dictionary is not deterministic
query = {'supertoken': 'b', 'text': 'text'}
expect = 'text=text&supertoken=b'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_unicode(self):
"""Test encoding unicode values."""
query = {'token': 'токен'}
expect = 'token=%D1%82%D0%BE%D0%BA%D0%B5%D0%BD'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_url_encoding_from_basestring(self):
"""Test encoding basestring values."""
if PY2:
query = {'token': str('test\xe2\x80\x94test'.encode('utf-8'))}
else:
query = {'token': 'test\xe2\x80\x94test'}
expect = str('token=test%C3%A2%C2%80%C2%94test')
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
def test_moving_special_tokens(self):
"""Test moving wpEditToken to the very end."""
query = {'wpEditToken': 'c', 'token': 'b', 'text': 'a'}
expect = 'text=a&token=b&wpEditToken=c'
result = api.encode_url(query)
self.assertEqual(result, expect)
self.assertIsInstance(result, str)
class DummyThrottle(Throttle):
"""Dummy Throttle class."""
def lag(self, lag):
"""Override lag method, save the lag value and exit the api loop."""
self._lagvalue = lag # save the lag value
raise SystemExit # exit the api loop
class TestLagpattern(DefaultSiteTestCase):
"""Test the lag pattern."""
cached = False
def test_valid_lagpattern(self):
"""Test whether api.lagpattern is valid."""
mysite = self.get_site()
if mysite.siteinfo['dbrepllag'][0]['lag'] == -1:
raise unittest.SkipTest(
'{0} is not running on a replicated database cluster.'
.format(mysite)
)
mythrottle = DummyThrottle(mysite)
mysite._throttle = mythrottle
params = {'action': 'query',
'titles': self.get_mainpage().title(),
'maxlag': -1}
req = api.Request(site=mysite, parameters=params)
try:
req.submit()
except SystemExit:
pass # expected exception from DummyThrottle instance
except api.APIError as e:
pywikibot.warning(
'Wrong api.lagpattern regex, cannot retrieve lag value')
raise e
value = mysite.throttle._lagvalue
self.assertIsInstance(value, int)
self.assertGreaterEqual(value, 0)
def test_individual_patterns(self):
"""Test api.lagpattern with example patterns."""
patterns = {
'Waiting for 10.64.32.115: 0.14024019241333 seconds lagged': 0,
'Waiting for hostname: 5 seconds lagged': 5,
'Waiting for 127.0.0.1: 1.7 seconds lagged': 1
}
for info, time in patterns.items():
lag = api.lagpattern.search(info)
self.assertIsNotNone(lag)
self.assertEqual(int(lag.group("lag")), time)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| mit |
fangxingli/hue | desktop/core/ext-py/elementtree/elementtree/SimpleXMLWriter.py | 103 | 8616 | #
# SimpleXMLWriter
# $Id: SimpleXMLWriter.py 2312 2005-03-02 18:13:39Z fredrik $
#
# a simple XML writer
#
# history:
# 2001-12-28 fl created
# 2002-11-25 fl fixed attribute encoding
# 2002-12-02 fl minor fixes for 1.5.2
# 2004-06-17 fl added pythondoc markup
# 2004-07-23 fl added flush method (from Jay Graves)
# 2004-10-03 fl added declaration method
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to write XML files, without having to deal with encoding
# issues, well-formedness, etc.
# <p>
# The current version does not provide built-in support for
# namespaces. To create files using namespaces, you have to provide
# "xmlns" attributes and explicitly add prefixes to tags and
# attributes.
#
# <h3>Patterns</h3>
#
# The following example generates a small XHTML document.
# <pre>
#
# from elementtree.SimpleXMLWriter import XMLWriter
# import sys
#
# w = XMLWriter(sys.stdout)
#
# html = w.start("html")
#
# w.start("head")
# w.element("title", "my document")
# w.element("meta", name="generator", value="my application 1.0")
# w.end()
#
# w.start("body")
# w.element("h1", "this is a heading")
# w.element("p", "this is a paragraph")
#
# w.start("p")
# w.data("this is ")
# w.element("b", "bold")
# w.data(" and ")
# w.element("i", "italic")
# w.data(".")
# w.end("p")
#
# w.close(html)
# </pre>
##
import re, sys, string
try:
unicode("")
except NameError:
def encode(s, encoding):
# 1.5.2: application must use the right encoding
return s
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
def encode(s, encoding):
return s.encode(encoding)
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
def encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m):
out = []
for char in m.group():
out.append("&#%d;" % ord(char))
return string.join(out, "")
return encode(pattern.sub(escape_entities, text), "ascii")
del _escape
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def escape_cdata(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
def escape_attrib(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "'", "'")
s = replace(s, "\"", """)
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
# @param encoding Optional encoding.
class XMLWriter:
def __init__(self, file, encoding="us-ascii"):
if not hasattr(file, "write"):
file = open(file, "w")
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__encoding = encoding
def __flush(self):
# flush internal buffers
if self.__open:
self.__write(">")
self.__open = 0
if self.__data:
data = string.join(self.__data, "")
self.__write(escape_cdata(data, self.__encoding))
self.__data = []
##
# Writes an XML declaration.
def declaration(self):
encoding = self.__encoding
if encoding == "us-ascii" or encoding == "utf-8":
self.__write("<?xml version='1.0'?>\n")
else:
self.__write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
##
# Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. You can pass in
# 8-bit strings or Unicode strings; the former are assumed to use
# the encoding passed to the constructor. The method returns an
# opaque identifier that can be passed to the <b>close</b> method,
# to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag, self.__encoding)
self.__data = []
self.__tags.append(tag)
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
k = escape_cdata(k, self.__encoding)
v = escape_attrib(v, self.__encoding)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as an 8-bit string or Unicode string.
def comment(self, comment):
self.__flush()
self.__write("<!-- %s -->\n" % escape_cdata(comment, self.__encoding))
##
# Adds character data to the output stream.
#
# @param text Character data, as an 8-bit string or Unicode string.
def data(self, text):
self.__data.append(text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag, self.__encoding) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush()
elif self.__open:
self.__open = 0
self.__write(" />")
return
self.__write("</%s>" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
apply(self.start, (tag, attrib), extra)
if text:
self.data(text)
self.end()
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
| apache-2.0 |
40023255/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/_threading_local.py | 923 | 7410 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
| agpl-3.0 |
t794104/ansible | lib/ansible/modules/network/nxos/nxos_ntp_options.py | 80 | 4744 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_options
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP options.
description:
- Manages NTP options, e.g. authoritative server and logging.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- When C(state=absent), master and logging will be set to False and
stratum will be removed as well
options:
master:
description:
- Sets whether the device is an authoritative NTP server.
type: bool
stratum:
description:
- If C(master=true), an optional stratum can be supplied (1-15).
The device default is 8.
logging:
description:
- Sets whether NTP logging is enabled on the device.
type: bool
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP options configuration
- nxos_ntp_options:
master: true
stratum: 12
logging: false
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
updates:
description: command sent to the device
returned: always
type: list
sample: ["no ntp logging", "ntp master 12"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def get_current(module):
cmd = ('show running-config | inc ntp')
master = False
logging = False
stratum = None
output = run_commands(module, ({'command': cmd, 'output': 'text'}))[0]
if output:
match = re.search(r"^ntp master(?: (\d+))", output, re.M)
if match:
master = True
stratum = match.group(1)
logging = 'ntp logging' in output.lower()
return {'master': master, 'stratum': stratum, 'logging': logging}
def main():
argument_spec = dict(
master=dict(required=False, type='bool'),
stratum=dict(required=False, type='str'),
logging=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
master = module.params['master']
stratum = module.params['stratum']
logging = module.params['logging']
state = module.params['state']
if stratum and master is False:
if stratum != 8:
module.fail_json(msg='master MUST be True when stratum is changed')
current = get_current(module)
result = {'changed': False}
commands = list()
if state == 'absent':
if current['master']:
commands.append('no ntp master')
if current['logging']:
commands.append('no ntp logging')
elif state == 'present':
if master and not current['master']:
commands.append('ntp master')
elif master is False and current['master']:
commands.append('no ntp master')
if stratum and stratum != current['stratum']:
commands.append('ntp master %s' % stratum)
if logging and not current['logging']:
commands.append('ntp logging')
elif logging is False and current['logging']:
commands.append('no ntp logging')
result['commands'] = commands
result['updates'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
result['warnings'] = warnings
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
837278709/metro-openerp | third_modules/sale_payment_method/__openerp__.py | 3 | 1710 | # -*- coding: utf-8 -*-
##############################################################################
#
# sale_payment_method for OpenERP
# Copyright (C) 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Payment Method',
'version': '0.2',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'description': """
Sale Payment Method
===================
This module adds low-level features used for instance by modules:
- Sale Automatic Workflow
- Sale Quick Payment
It adds a payment method on the sales orders and allow to register
payments entries on sales orders.
""",
'author': 'Akretion',
'website': 'http://www.akretion.com/',
'depends': ['sale',
],
'data': ['sale_view.xml',
'payment_method_view.xml',
'security/ir.model.access.csv',
'security/rules.xml',
],
'demo': [],
'installable': True,
}
| agpl-3.0 |
espadrine/opera | chromium/src/third_party/python_26/Lib/test/sortperf.py | 232 | 4746 | """Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except IOError:
r = random.random
result = [r() for i in xrange(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except os.error:
pass
except IOError, msg:
print "can't write", fn, ":", msg
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.clock()
L.sort()
t1 = time.clock()
print "%6.2f" % (t1-t0),
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print fmt % (("i", "2**i") + cases)
for i in r:
n = 1 << i
L = randfloats(n)
print "%2d %7d" % (i, n),
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in xrange(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = map(lambda x: --x, L)
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = map(abs, [-0.5] * n)
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = range(half - 1, -1, -1)
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = map(float, L)
doit(L) # !sort
print
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
| bsd-3-clause |
gauribhoite/personfinder | env/site-packages/Crypto/SelfTest/Cipher/__init__.py | 117 | 2401 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/__init__.py: Self-test for cipher modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for cipher modules"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Cipher import test_AES; tests += test_AES.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_ARC2; tests += test_ARC2.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_ARC4; tests += test_ARC4.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_Blowfish; tests += test_Blowfish.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_CAST; tests += test_CAST.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_DES3; tests += test_DES3.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_DES; tests += test_DES.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_XOR; tests += test_XOR.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_pkcs1_15; tests += test_pkcs1_15.get_tests(config=config)
from Crypto.SelfTest.Cipher import test_pkcs1_oaep; tests += test_pkcs1_oaep.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
4doemaster/enigma2 | lib/python/Plugins/Extensions/Modem/plugin.py | 28 | 8329 | def getDefaultGateway():
f = open("/proc/net/route", "r")
if f:
for line in f.readlines():
tokens = line.split('\t')
if tokens[1] == '00000000': #dest 0.0.0.0
return int(tokens[2], 16)
return None
def getTelephone():
f = open("/etc/ppp/options", "r")
if f:
for line in f.readlines():
if line.find('connect') == 0:
line = line[line.find(' ')+1:]
line = line[line.find(' ')+1:]
line = line[:line.find('"')]
return line
return ""
def setOptions(tel, user):
f = open("/etc/ppp/options", "r+")
if f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line.find('connect') == 0:
p = line.find(' ')
p = line.find(' ', p+1)
line = line[:p+1]
f.write(line+tel+'"\n')
elif line.find('user') == 0:
f.write('user '+user+'\n')
else:
f.write(line)
def getSecretString():
f = open("/etc/ppp/pap-secrets", "r")
if f:
for line in f.readlines():
if line[0] == '#' or line.find('*') == -1:
continue
for ch in (' ', '\t', '\n', '"'):
line = line.replace(ch, '')
return line
return None
def setSecretString(secret):
f = open("/etc/ppp/pap-secrets", 'r+')
if f:
lines = f.readlines()
f.seek(0)
for line in lines:
if line[0] == '#' or line.find('*') == -1:
f.write(line)
continue
f.write(secret+'\n')
from Screens.Screen import Screen
from Plugins.Plugin import PluginDescriptor
from enigma import eConsoleAppContainer, eTimer
from Components.Label import Label
from Components.Button import Button
from Components.ConfigList import ConfigList
from Components.config import ConfigText, ConfigPassword, KEY_LEFT, KEY_RIGHT, KEY_0, KEY_DELETE, KEY_BACKSPACE
from Components.ActionMap import NumberActionMap, ActionMap
from os import system
NONE = 0
CONNECT = 1
ABORT = 2
DISCONNECT = 3
gateway = None
def pppdClosed(ret):
global gateway
print "modem disconnected", ret
if gateway:
#FIXMEEE... hardcoded for little endian!!
system("route add default gw %d.%d.%d.%d" %(gateway&0xFF, (gateway>>8)&0xFF, (gateway>>16)&0xFF, (gateway>>24)&0xFF))
connected = False
conn = eConsoleAppContainer()
conn.appClosed.append(pppdClosed)
class ModemSetup(Screen):
skin = """
<screen position="180,100" size="320,300" title="Modem" >
<ePixmap pixmap="skin_default/buttons/green.png" position="10,10" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/red.png" position="160,10" size="140,40" alphatest="on" />
<widget name="key_green" position="10,10" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget name="key_red" position="160,10" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="10,60" size="300,120" />
<widget name="state" position="10,210" size="300,80" font="Regular;20" />
</screen>"""
def nothing(self):
print "nothing!"
def __init__(self, session, args = None):
global connected
global conn
self.skin = ModemSetup.skin
secret = getSecretString()
user = secret[:secret.find('*')]
password = secret[secret.find('*')+1:]
self.username = ConfigText(user, fixed_size=False)
self.password = ConfigPassword(password, fixed_size=False)
self.phone = ConfigText(getTelephone(), fixed_size=False)
self.phone.setUseableChars(u"0123456789")
lst = [ (_("Username"), self.username),
(_("Password"), self.password),
(_("Phone number"), self.phone) ]
self["list"] = ConfigList(lst)
self["key_green"] = Button("")
self["key_red"] = Button("")
self["state"] = Label("")
self["actions"] = NumberActionMap(["ModemActions"],
{
"cancel": self.close,
"left": self.keyLeft,
"right": self.keyRight,
"connect": self.connect,
"disconnect": self.disconnect,
"deleteForward": self.deleteForward,
"deleteBackward": self.deleteBackward,
"0": self.keyNumber,
"1": self.keyNumber,
"2": self.keyNumber,
"3": self.keyNumber,
"4": self.keyNumber,
"5": self.keyNumber,
"6": self.keyNumber,
"7": self.keyNumber,
"8": self.keyNumber,
"9": self.keyNumber
}, -1)
self["ListActions"] = ActionMap(["ListboxDisableActions"],
{
"moveUp": self.nothing,
"moveDown": self.nothing,
"moveTop": self.nothing,
"moveEnd": self.nothing,
"pageUp": self.nothing,
"pageDown": self.nothing
}, -1)
self.stateTimer = eTimer()
self.stateTimer.callback.append(self.stateLoop)
conn.appClosed.append(self.pppdClosed)
conn.dataAvail.append(self.dataAvail)
Screen.__init__(self, session)
self.onClose.append(self.__closed)
self.onLayoutFinish.append(self.__layoutFinished)
def __layoutFinished(self):
global conn
if conn.running():
self["state"].setText(_("Connected!"));
self.green_function = NONE
self.red_function = DISCONNECT
else:
self.green_function = CONNECT
self.red_function = NONE
self.updateGui()
def __closed(self):
global connected
conn.appClosed.remove(self.pppdClosed)
conn.dataAvail.remove(self.dataAvail)
if not connected:
conn.sendCtrlC()
setOptions(self.phone.getText(), self.username.getText())
setSecretString(self.username.getText() + ' * ' + self.password.getText())
def stateLoop(self):
txt = self["state"].getText()
txt += '.'
self["state"].setText(txt)
def connect(self):
if self.green_function == CONNECT:
global gateway
gateway = getDefaultGateway()
self["state"].setText(_("Dialing:"))
system("route del default")
system("modprobe ppp_async");
self.stateTimer.start(1000,False)
setOptions(self.phone.getText(), self.username.getText())
setSecretString(self.username.getText() + ' * ' + self.password.getText())
ret = conn.execute("pppd", "pppd", "-d", "-detach")
if ret:
print "execute pppd failed!"
self.pppdClosed(ret)
pppdClosed(ret)
self.green_function = NONE
self.red_function = ABORT
self.updateGui()
def disconnect(self):
conn.sendCtrlC()
self.red_function = NONE
self.updateGui()
def keyLeft(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_LEFT)
def keyRight(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_RIGHT)
def keyNumber(self, number):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_0 + number)
def deleteForward(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_DELETE)
def deleteBackward(self):
if self.green_function == CONNECT:
self["list"].handleKey(KEY_BACKSPACE)
def pppdClosed(self, retval):
global connected
self.stateTimer.stop()
self.red_function = NONE
self.green_function = CONNECT
self["state"].setText("")
self.updateGui()
connected = False
def dataAvail(self, text):
if text.find("Serial connection established") != -1:
tmp = self["state"].getText()
tmp += "OK\nLogin:"
self["state"].setText(tmp)
if text.find("PAP authentication succeeded") != -1:
tmp = self["state"].getText()
tmp += "OK\n";
self["state"].setText(tmp)
self.stateTimer.stop()
if text.find("ip-up finished") != -1:
global connected
tmp = self["state"].getText()
tmp += "Connected :)\n"
self["state"].setText(tmp)
self.red_function = DISCONNECT
connected=True
if text.find("Connect script failed") != -1:
tmp = self["state"].getText()
tmp += "FAILED\n"
self["state"].setText(tmp)
self.stateTimer.stop()
self.red_function = NONE
self.green_function = CONNECT
self.updateGui()
def updateGui(self):
if self.red_function == NONE:
self["key_red"].setText("")
elif self.red_function == DISCONNECT:
self["key_red"].setText(_("Disconnect"))
elif self.red_function == ABORT:
self["key_red"].setText(_("Abort"))
if self.green_function == NONE:
self["key_green"].setText("")
elif self.green_function == CONNECT:
self["key_green"].setText(_("Connect"))
focus_enabled = self.green_function == CONNECT
self["list"].instance.setSelectionEnable(focus_enabled)
self["ListActions"].setEnabled(not focus_enabled)
def main(session, **kwargs):
session.open(ModemSetup)
def Plugins(**kwargs):
return PluginDescriptor(name="Modem", description="plugin to connect to internet via builtin modem", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=main)
| gpl-2.0 |
metno/diana-extras | CAP_to_KML/cap2kml.py | 1 | 10261 | #!/usr/bin/env python
# Copyright (C) 2015 MET Norway
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Converts Common Alerting Protocol (CAP) files into KML files suitable for
use with Diana (http://diana.met.no).
Usage: cap2kml.py <CAP file> [<KML file for Diana> [<input file for bdiana>]]
Parses and validates the given CAP file. If an output KML file is specified,
the KML text is written to the file; otherwise it is written to stdout.
If a bdiana input file is specified, this is created to contain the necessary
plot commands used to generate image files for each of the times used in the
original CAP file."""
import math, os, sys
import datetime, dateutil.parser
from lxml.etree import Element, ElementTree, SubElement
from lxml import etree
bdiana_template = """
buffersize=600x800
colour=COLOUR
filename=%(image file)s
output=PNG
setupfile=/etc/diana/setup/diana.setup-COMMON
settime=%(warning time)s
PLOT
MAP backcolour=white map=Gshhs-Auto contour=on cont.colour=black cont.linewidth=1 cont.linetype=solid cont.zorder=1 land=on land.colour=200:200:200 land.zorder=0 lon=off lat=off frame=off
AREA name=Norge
DRAWING file=%(kml file)s
LABEL data font=BITMAPFONT fontsize=8
LABEL text="$day $date $auto UTC" tcolour=red bcolour=black fcolour=white:200 polystyle=both halign=left valign=top font=BITMAPFONT fontsize=8
ENDPLOT
"""
# Define some common style properties.
style_properties = {'type': 'Dangerous weather warning'}
def find_properties(element, names, nsmap):
"""Finds the subelements of the given element that correspond to properties
with the specified names, using the namespace map, nsmap, to enable XPath
searches with the cap: prefix."""
properties = {}
for name in names:
p = element.find('.//cap:' + name, nsmap)
if p is not None:
properties[name] = p.text
return properties
def write_extended_data_values(properties, extdata, prefix):
"""Writes the contents of the properties dictionary to the XML element,
extdata, containing the extended data values, giving each piece of data
the specified prefix string."""
for key, value in properties.items():
if type(value) == dict:
write_extended_data_values(value, extdata, prefix + key + ":")
else:
data = SubElement(extdata, 'Data')
data.set('name', prefix + key)
SubElement(data, 'value').text = unicode(value)
if __name__ == "__main__":
if not 2 <= len(sys.argv) <= 4:
sys.stderr.write("Usage: %s <CAP file> [<KML file for Diana> [<input file for bdiana>]]\n" % sys.argv[0])
sys.exit(1)
cap_file = sys.argv[1]
if len(sys.argv) >= 3:
kml_file = sys.argv[2]
else:
kml_file = None
if len(sys.argv) == 4:
input_file = sys.argv[3]
else:
input_file = None
# Collect the starting times used in the CAP file.
times = set()
# Load the CAP schema.
schema_doc = etree.parse(os.path.join("schemas", "CAP-v1.2.xsd"))
schema = etree.XMLSchema(schema_doc)
# Parse and validate the CAP file.
root = etree.parse(cap_file)
nsmap = {'cap': 'urn:oasis:names:tc:emergency:cap:1.2'}
if not schema.validate(root):
sys.stderr.write("Error: CAP file '%s' is not valid.\n" % cap_file)
sys.exit(1)
# Obtain basic information about the alert.
basic_info = find_properties(root, ['identifier', 'sender', 'sent',
'status', 'msgType', 'scope'], nsmap)
kml = Element('kml')
kml.set('xmlns', "http://www.opengis.net/kml/2.2")
doc = SubElement(kml, 'Document')
# Obtain each info element in the file.
for info in root.findall('.//cap:info', nsmap):
# Create a folder for each info element in the KML file.
folder = SubElement(doc, 'Folder')
name = SubElement(folder, 'name')
name.text = info.find('.//cap:event', nsmap).text
optional_info = find_properties(info, ['headline', 'description'], nsmap)
# Each info element may have effective and expires elements, but they
# are optional.
effective = info.find('.//cap:effective', nsmap)
expires = info.find('.//cap:expires', nsmap)
# We need either effective and expires properties or the time the
# message was sent and the expires property.
if expires is not None:
if effective is not None:
fromtime = dateutil.parser.parse(effective.text).strftime('%Y-%m-%dT%H:%M:%SZ')
else:
fromtime = dateutil.parser.parse(basic_info['sent']).strftime('%Y-%m-%dT%H:%M:%SZ')
# Record the starting time for later use.
times.add(fromtime)
timespan = SubElement(folder, 'TimeSpan')
begin = SubElement(timespan, 'begin')
begin.text = fromtime
end = SubElement(timespan, 'end')
end.text = dateutil.parser.parse(expires.text).strftime('%Y-%m-%dT%H:%M:%SZ')
# Compile a dictionary of properties for attributes in the info
# element for inclusion in each Placemark.
properties = find_properties(info, ['category', 'severity', 'urgency', 'certainty'], nsmap)
# Examine each area element in the info element.
for area in info.findall('.//cap:area', nsmap):
areaDesc = area.find('.//cap:areaDesc', nsmap)
placemark = SubElement(folder, 'Placemark')
SubElement(placemark, 'name').text = optional_info.get('headline', '')
SubElement(placemark, 'description').text = areaDesc.text
extdata = SubElement(placemark, 'ExtendedData')
data = SubElement(extdata, 'Data')
data.set('name', u'met:objectType')
SubElement(data, 'value').text = 'PolyLine'
# Add area-specific properties to the ones common to the info element.
area_properties = find_properties(area, ['altitude', 'ceiling'], nsmap)
geocode = area.find('.//cap:geocode', nsmap)
if geocode is not None:
area_properties['geocode:name'] = geocode.find('.//cap:valueName', nsmap).text
area_properties['geocode:value'] = geocode.find('.//cap:value', nsmap).text
area_properties.update(properties)
# Write the info properties as extended data values.
write_extended_data_values(area_properties, extdata, "met:info:cap:")
# Write the common style properties.
write_extended_data_values(style_properties, extdata, "met:style:")
# If the area contains polygons then transfer their coordinates
# to the KML file.
for polygon in area.findall('.//cap:polygon', nsmap):
kml_polygon = SubElement(placemark, 'Polygon')
SubElement(kml_polygon, 'tessellate').text = '1'
boundary = SubElement(kml_polygon, 'outerBoundaryIs')
ring = SubElement(boundary, 'LinearRing')
coordinates = SubElement(ring, 'coordinates')
coordinates.text = ''
# Coordinates are specified as latitude,longitude in CAP files
# so we need to transpose them for KML. The first and last
# points should already be the same.
for coord in polygon.text.split():
if not coord:
continue
lat, lon = coord.split(',')
coordinates.text += lon + ',' + lat + '\n'
# If the area contains circles then transfer their coordinates
# to the KML file as a polygon.
for circle in area.findall('.//cap:circle', nsmap):
kml_polygon = SubElement(placemark, 'Polygon')
SubElement(kml_polygon, 'tessellate').text = '1'
boundary = SubElement(kml_polygon, 'outerBoundaryIs')
ring = SubElement(boundary, 'LinearRing')
coordinates = SubElement(ring, 'coordinates')
coordinates.text = ''
# Convert the circle with the given centre and radius to a
# polygon with 20 points plus the first point again.
centre, radius = circle.text.strip().split()
clat, clon = map(float, centre.split(','))
radius = float(radius)
i = 0
while i <= 20:
lat = clat + (radius * math.cos((i/20.0) * (math.pi/180)))
lon = clon + (radius * math.sin((i/20.0) * (math.pi/180)))
coordinates.text += '%f,%f\n' % (lon, lat)
i += 1
if not kml_file:
f = sys.stdout
else:
f = open(kml_file, 'wb')
# Write the KML file.
ElementTree(kml).write(f, encoding='UTF-8', xml_declaration=True, pretty_print=True)
f.close()
if input_file:
stem = os.path.splitext(kml_file)[0]
f = open(input_file, 'w')
f.write("# Created by cap2kml.py at %s.\n" % datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'))
# Create an input specification for bdiana and write it to a file.
times = list(times)
times.sort()
i = 0
for time in times:
f.write(bdiana_template % {'image file': '%s-%i.png' % (stem, i),
'warning time': time,
'kml file': kml_file})
i += 1
f.close()
sys.exit()
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_assembly_clr_py3.py | 1 | 1741 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .catalog_item_py3 import CatalogItem
class USqlAssemblyClr(CatalogItem):
"""A Data Lake Analytics catalog U-SQL assembly CLR item.
:param compute_account_name: the name of the Data Lake Analytics account.
:type compute_account_name: str
:param version: the version of the catalog item.
:type version: str
:param database_name: the name of the database.
:type database_name: str
:param name: the name of the assembly.
:type name: str
:param clr_name: the name of the CLR.
:type clr_name: str
"""
_attribute_map = {
'compute_account_name': {'key': 'computeAccountName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'database_name': {'key': 'databaseName', 'type': 'str'},
'name': {'key': 'assemblyClrName', 'type': 'str'},
'clr_name': {'key': 'clrName', 'type': 'str'},
}
def __init__(self, *, compute_account_name: str=None, version: str=None, database_name: str=None, name: str=None, clr_name: str=None, **kwargs) -> None:
super(USqlAssemblyClr, self).__init__(compute_account_name=compute_account_name, version=version, **kwargs)
self.database_name = database_name
self.name = name
self.clr_name = clr_name
| mit |
fedora-infra/anitya | anitya/tests/lib/backends/test_gnu.py | 1 | 5108 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
anitya tests for the custom backend.
"""
import unittest
import mock
import anitya.lib.backends.gnu as backend
from anitya.db import models
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import DatabaseTestCase, create_distro
BACKEND = "GNU project"
class GnuBackendtests(DatabaseTestCase):
"""custom backend tests."""
def setUp(self):
"""Set up the environnment, ran before every tests."""
super(GnuBackendtests, self).setUp()
create_distro(self.session)
self.create_project()
def create_project(self):
"""Create some basic projects to work with."""
project = models.Project(
name="gnash",
homepage="https://www.gnu.org/software/gnash/",
version_url="https://ftp.gnu.org/pub/gnu/gnash/",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="fake",
homepage="https://pypi.python.org/pypi/repo_manager_fake",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="subsurface",
homepage="https://subsurface-divelog.org/",
version_url="https://subsurface-divelog.org/downloads/",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
def test_custom_get_version(self):
"""Test the get_version function of the custom backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = "0.8.10"
obs = backend.GnuBackend.get_version(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.GnuBackend.get_version, project
)
pid = 3
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.GnuBackend.get_version, project
)
def test_get_version_url(self):
"""Assert that correct url is returned."""
project = models.Project(
name="test", homepage="http://example.org", backend=BACKEND
)
exp = "https://ftp.gnu.org/gnu/test/"
obs = backend.GnuBackend.get_version_url(project)
self.assertEqual(obs, exp)
def test_custom_get_versions(self):
"""Test the get_versions function of the custom backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = [
"0.7.1",
"0.7.2",
"0.8.0",
"0.8.1",
"0.8.2",
"0.8.3",
"0.8.4",
"0.8.5",
"0.8.6",
"0.8.7",
"0.8.8",
"0.8.9",
"0.8.10",
]
obs = backend.GnuBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.GnuBackend.get_version, project
)
pid = 3
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.GnuBackend.get_version, project
)
def test_get_versions_not_modified(self):
"""Assert that not modified response is handled correctly"""
pid = 1
project = models.Project.get(self.session, pid)
exp_url = "https://ftp.gnu.org/gnu/gnash/"
with mock.patch("anitya.lib.backends.BaseBackend.call_url") as m_call:
m_call.return_value = mock.Mock(status_code=304)
versions = backend.GnuBackend.get_versions(project)
m_call.assert_called_with(exp_url, last_change=None)
self.assertEqual(versions, [])
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(GnuBackendtests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 |
gdimitris/ChessPuzzlerBackend | Virtual_Environment/lib/python2.7/site-packages/migrate/versioning/shell.py | 77 | 6505 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The migrate command-line tool."""
import sys
import inspect
import logging
from optparse import OptionParser, BadOptionError
from migrate import exceptions
from migrate.versioning import api
from migrate.versioning.config import *
from migrate.versioning.util import asbool
import six
alias = dict(
s=api.script,
vc=api.version_control,
dbv=api.db_version,
v=api.version,
)
def alias_setup():
global alias
for key, val in six.iteritems(alias):
setattr(api, key, val)
alias_setup()
class PassiveOptionParser(OptionParser):
def _process_args(self, largs, rargs, values):
"""little hack to support all --some_option=value parameters"""
while rargs:
arg = rargs[0]
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# if parser does not know about the option
# pass it along (make it anonymous)
try:
opt = arg.split('=', 1)[0]
self._match_long_opt(opt)
except BadOptionError:
largs.append(arg)
del rargs[0]
else:
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
def main(argv=None, **kwargs):
"""Shell interface to :mod:`migrate.versioning.api`.
kwargs are default options that can be overriden with passing
--some_option as command line option
:param disable_logging: Let migrate configure logging
:type disable_logging: bool
"""
if argv is not None:
argv = argv
else:
argv = list(sys.argv[1:])
commands = list(api.__all__)
commands.sort()
usage = """%%prog COMMAND ...
Available commands:
%s
Enter "%%prog help COMMAND" for information on a particular command.
""" % '\n\t'.join(["%s - %s" % (command.ljust(28), api.command_desc.get(command)) for command in commands])
parser = PassiveOptionParser(usage=usage)
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
default=False,
help="Shortcut to turn on DEBUG mode for logging")
parser.add_option("-q", "--disable_logging",
action="store_true",
dest="disable_logging",
default=False,
help="Use this option to disable logging configuration")
help_commands = ['help', '-h', '--help']
HELP = False
try:
command = argv.pop(0)
if command in help_commands:
HELP = True
command = argv.pop(0)
except IndexError:
parser.print_help()
return
command_func = getattr(api, command, None)
if command_func is None or command.startswith('_'):
parser.error("Invalid command %s" % command)
parser.set_usage(inspect.getdoc(command_func))
f_args, f_varargs, f_kwargs, f_defaults = inspect.getargspec(command_func)
for arg in f_args:
parser.add_option(
"--%s" % arg,
dest=arg,
action='store',
type="string")
# display help of the current command
if HELP:
parser.print_help()
return
options, args = parser.parse_args(argv)
# override kwargs with anonymous parameters
override_kwargs = dict()
for arg in list(args):
if arg.startswith('--'):
args.remove(arg)
if '=' in arg:
opt, value = arg[2:].split('=', 1)
else:
opt = arg[2:]
value = True
override_kwargs[opt] = value
# override kwargs with options if user is overwriting
for key, value in six.iteritems(options.__dict__):
if value is not None:
override_kwargs[key] = value
# arguments that function accepts without passed kwargs
f_required = list(f_args)
candidates = dict(kwargs)
candidates.update(override_kwargs)
for key, value in six.iteritems(candidates):
if key in f_args:
f_required.remove(key)
# map function arguments to parsed arguments
for arg in args:
try:
kw = f_required.pop(0)
except IndexError:
parser.error("Too many arguments for command %s: %s" % (command,
arg))
kwargs[kw] = arg
# apply overrides
kwargs.update(override_kwargs)
# configure options
for key, value in six.iteritems(options.__dict__):
kwargs.setdefault(key, value)
# configure logging
if not asbool(kwargs.pop('disable_logging', False)):
# filter to log =< INFO into stdout and rest to stderr
class SingleLevelFilter(logging.Filter):
def __init__(self, min=None, max=None):
self.min = min or 0
self.max = max or 100
def filter(self, record):
return self.min <= record.levelno <= self.max
logger = logging.getLogger()
h1 = logging.StreamHandler(sys.stdout)
f1 = SingleLevelFilter(max=logging.INFO)
h1.addFilter(f1)
h2 = logging.StreamHandler(sys.stderr)
f2 = SingleLevelFilter(min=logging.WARN)
h2.addFilter(f2)
logger.addHandler(h1)
logger.addHandler(h2)
if options.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
log = logging.getLogger(__name__)
# check if all args are given
try:
num_defaults = len(f_defaults)
except TypeError:
num_defaults = 0
f_args_default = f_args[len(f_args) - num_defaults:]
required = list(set(f_required) - set(f_args_default))
required.sort()
if required:
parser.error("Not enough arguments for command %s: %s not specified" \
% (command, ', '.join(required)))
# handle command
try:
ret = command_func(**kwargs)
if ret is not None:
log.info(ret)
except (exceptions.UsageError, exceptions.KnownError) as e:
parser.error(e.args[0])
if __name__ == "__main__":
main()
| mit |
philipn/sycamore | Sycamore/support/pytz/tests/test_tzinfo.py | 2 | 16863 | # -*- coding: ascii -*-
import sys, os, os.path
import unittest, doctest
import cPickle as pickle
from datetime import datetime, tzinfo, timedelta
if __name__ == '__main__':
# Only munge path if invoked as a script. Testrunners should have setup
# the paths already
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir)))
import pytz
from pytz import reference
# I test for expected version to ensure the correct version of pytz is
# actually being installed.
EXPECTED_VERSION='2007d'
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
NOTIME = timedelta(0)
# GMT is a tzinfo.StaticTzInfo--the class we primarily want to test--while
# UTC is reference implementation. They both have the same timezone meaning.
UTC = pytz.timezone('UTC')
GMT = pytz.timezone('GMT')
class BasicTest(unittest.TestCase):
def testVersion(self):
# Ensuring the correct version of pytz has been loaded
self.failUnlessEqual(EXPECTED_VERSION, pytz.__version__,
'Incorrect pytz version loaded. Import path is stuffed '
'or this test needs updating. (Wanted %s, got %s)'
% (EXPECTED_VERSION, pytz.__version__)
)
def testGMT(self):
now = datetime.now(tz=GMT)
self.failUnless(now.utcoffset() == NOTIME)
self.failUnless(now.dst() == NOTIME)
self.failUnless(now.timetuple() == now.utctimetuple())
self.failUnless(now==now.replace(tzinfo=UTC))
def testReferenceUTC(self):
now = datetime.now(tz=UTC)
self.failUnless(now.utcoffset() == NOTIME)
self.failUnless(now.dst() == NOTIME)
self.failUnless(now.timetuple() == now.utctimetuple())
class PicklingTest(unittest.TestCase):
def _roundtrip_tzinfo(self, tz):
p = pickle.dumps(tz)
unpickled_tz = pickle.loads(p)
self.failUnless(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def _roundtrip_datetime(self, dt):
# Ensure that the tzinfo attached to a datetime instance
# is identical to the one returned. This is important for
# DST timezones, as some state is stored in the tzinfo.
tz = dt.tzinfo
p = pickle.dumps(dt)
unpickled_dt = pickle.loads(p)
unpickled_tz = unpickled_dt.tzinfo
self.failUnless(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def testDst(self):
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 2, 1, 0, 0, 0)
for localized_tz in tz._tzinfos.values():
self._roundtrip_tzinfo(localized_tz)
self._roundtrip_datetime(dt.replace(tzinfo=localized_tz))
def testRoundtrip(self):
dt = datetime(2004, 2, 1, 0, 0, 0)
for zone in pytz.all_timezones:
tz = pytz.timezone(zone)
self._roundtrip_tzinfo(tz)
def testDatabaseFixes(self):
# Hack the pickle to make it refer to a timezone abbreviation
# that does not match anything. The unpickler should be able
# to repair this case
tz = pytz.timezone('Australia/Melbourne')
p = pickle.dumps(tz)
tzname = tz._tzname
hacked_p = p.replace(tzname, '???')
self.failIfEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.failUnless(tz is unpickled_tz)
# Simulate a database correction. In this case, the incorrect
# data will continue to be used.
p = pickle.dumps(tz)
new_utcoffset = tz._utcoffset.seconds + 42
hacked_p = p.replace(str(tz._utcoffset.seconds), str(new_utcoffset))
self.failIfEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.failUnlessEqual(unpickled_tz._utcoffset.seconds, new_utcoffset)
self.failUnless(tz is not unpickled_tz)
def testOldPickles(self):
# Ensure that applications serializing pytz instances as pickles
# have no troubles upgrading to a new pytz release. These pickles
# where created with pytz2006j
east1 = pickle.loads(
"cpytz\n_p\np1\n(S'US/Eastern'\np2\nI-18000\n"
"I0\nS'EST'\np3\ntRp4\n."
)
east2 = pytz.timezone('US/Eastern')
self.failUnless(east1 is east2)
# Confirm changes in name munging between 2006j and 2007c cause
# no problems.
pap1 = pickle.loads(
"cpytz\n_p\np1\n(S'America/Port_minus_au_minus_Prince'"
"\np2\nI-17340\nI0\nS'PPMT'\np3\ntRp4\n."
)
pap2 = pytz.timezone('America/Port-au-Prince')
self.failUnless(pap1 is pap2)
gmt1 = pickle.loads("cpytz\n_p\np1\n(S'Etc/GMT_plus_10'\np2\ntRp3\n.")
gmt2 = pytz.timezone('Etc/GMT+10')
self.failUnless(gmt1 is gmt2)
class USEasternDSTStartTestCase(unittest.TestCase):
tzinfo = pytz.timezone('US/Eastern')
# 24 hours before DST changeover
transition_time = datetime(2002, 4, 7, 7, 0, 0, tzinfo=UTC)
# Increase for 'flexible' DST transitions due to 1 minute granularity
# of Python's datetime library
instant = timedelta(seconds=1)
# before transition
before = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
# after transition
after = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
def _test_tzname(self, utc_dt, wanted):
tzname = wanted['tzname']
dt = utc_dt.astimezone(self.tzinfo)
self.failUnlessEqual(dt.tzname(), tzname,
'Expected %s as tzname for %s. Got %s' % (
tzname, str(utc_dt), dt.tzname()
)
)
def _test_utcoffset(self, utc_dt, wanted):
utcoffset = wanted['utcoffset']
dt = utc_dt.astimezone(self.tzinfo)
self.failUnlessEqual(
dt.utcoffset(), wanted['utcoffset'],
'Expected %s as utcoffset for %s. Got %s' % (
utcoffset, utc_dt, dt.utcoffset()
)
)
def _test_dst(self, utc_dt, wanted):
dst = wanted['dst']
dt = utc_dt.astimezone(self.tzinfo)
self.failUnlessEqual(dt.dst(),dst,
'Expected %s as dst for %s. Got %s' % (
dst, utc_dt, dt.dst()
)
)
def test_arithmetic(self):
utc_dt = self.transition_time
for days in range(-420, 720, 20):
delta = timedelta(days=days)
# Make sure we can get back where we started
dt = utc_dt.astimezone(self.tzinfo)
dt2 = dt + delta
dt2 = dt2 - delta
self.failUnlessEqual(dt, dt2)
# Make sure arithmetic crossing DST boundaries ends
# up in the correct timezone after normalization
self.failUnlessEqual(
(utc_dt + delta).astimezone(self.tzinfo).strftime(fmt),
self.tzinfo.normalize(dt + delta).strftime(fmt),
'Incorrect result for delta==%d days. Wanted %r. Got %r'%(
days,
(utc_dt + delta).astimezone(self.tzinfo).strftime(fmt),
self.tzinfo.normalize(dt + delta).strftime(fmt),
)
)
def _test_all(self, utc_dt, wanted):
self._test_utcoffset(utc_dt, wanted)
self._test_tzname(utc_dt, wanted)
self._test_dst(utc_dt, wanted)
def testDayBefore(self):
self._test_all(
self.transition_time - timedelta(days=1), self.before
)
def testTwoHoursBefore(self):
self._test_all(
self.transition_time - timedelta(hours=2), self.before
)
def testHourBefore(self):
self._test_all(
self.transition_time - timedelta(hours=1), self.before
)
def testInstantBefore(self):
self._test_all(
self.transition_time - self.instant, self.before
)
def testTransition(self):
self._test_all(
self.transition_time, self.after
)
def testInstantAfter(self):
self._test_all(
self.transition_time + self.instant, self.after
)
def testHourAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testTwoHoursAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testDayAfter(self):
self._test_all(
self.transition_time + timedelta(days=1), self.after
)
class USEasternDSTEndTestCase(USEasternDSTStartTestCase):
tzinfo = pytz.timezone('US/Eastern')
transition_time = datetime(2002, 10, 27, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class USEasternEPTStartTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 8, 14, 23, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EWT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
class USEasternEPTEndTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 9, 30, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class WarsawWMTEndTestCase(USEasternDSTStartTestCase):
# In 1915, Warsaw changed from Warsaw to Central European time.
# This involved the clocks being set backwards, causing a end-of-DST
# like situation without DST being involved.
tzinfo = pytz.timezone('Europe/Warsaw')
transition_time = datetime(1915, 8, 4, 22, 36, 0, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'CET',
'utcoffset': timedelta(hours=1),
'dst': timedelta(0),
}
class VilniusWMTEndTestCase(USEasternDSTStartTestCase):
# At the end of 1916, Vilnius changed timezones putting its clock
# forward by 11 minutes 35 seconds. Neither timezone was in DST mode.
tzinfo = pytz.timezone('Europe/Vilnius')
instant = timedelta(seconds=31)
transition_time = datetime(1916, 12, 31, 22, 36, 00, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'KMT',
'utcoffset': timedelta(hours=1, minutes=36), # Really 1:35:36
'dst': timedelta(0),
}
class ReferenceUSEasternDSTStartTestCase(USEasternDSTStartTestCase):
tzinfo = reference.Eastern
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class ReferenceUSEasternDSTEndTestCase(USEasternDSTEndTestCase):
tzinfo = reference.Eastern
def testHourBefore(self):
# Python's datetime library has a bug, where the hour before
# a daylight savings transition is one hour out. For example,
# at the end of US/Eastern daylight savings time, 01:00 EST
# occurs twice (once at 05:00 UTC and once at 06:00 UTC),
# whereas the first should actually be 01:00 EDT.
# Note that this bug is by design - by accepting this ambiguity
# for one hour one hour per year, an is_dst flag on datetime.time
# became unnecessary.
self._test_all(
self.transition_time - timedelta(hours=1), self.after
)
def testInstantBefore(self):
self._test_all(
self.transition_time - timedelta(seconds=1), self.after
)
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class LocalTestCase(unittest.TestCase):
def testLocalize(self):
loc_tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1930, 5, 10, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'AMT+0020')
loc_time = loc_tz.localize(datetime(1930, 5, 20, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'NST+0120')
loc_time = loc_tz.localize(datetime(1940, 5, 10, 0, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'NET+0020')
loc_time = loc_tz.localize(datetime(1940, 5, 20, 0, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
loc_time = loc_tz.localize(datetime(2004, 2, 1, 0, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'CET+0100')
loc_time = loc_tz.localize(datetime(2004, 4, 1, 0, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1943, 3, 29, 1, 59, 59))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'CET+0100')
# Switch to US
loc_tz = pytz.timezone('US/Eastern')
# End of DST ambiguity check
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=1)
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EDT-0400')
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=0)
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EST-0500')
self.failUnlessRaises(pytz.AmbiguousTimeError,
loc_tz.localize, datetime(1918, 10, 27, 1, 59, 59), is_dst=None
)
# Weird changes - war time and peace time both is_dst==True
loc_time = loc_tz.localize(datetime(1942, 2, 9, 3, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EWT-0400')
loc_time = loc_tz.localize(datetime(1945, 8, 14, 19, 0, 0))
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=1)
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=0)
self.failUnlessEqual(loc_time.strftime('%Z%z'), 'EST-0500')
def testNormalize(self):
tz = pytz.timezone('US/Eastern')
dt = datetime(2004, 4, 4, 7, 0, 0, tzinfo=UTC).astimezone(tz)
dt2 = dt - timedelta(minutes=10)
self.failUnlessEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 02:50:00 EDT-0400'
)
dt2 = tz.normalize(dt2)
self.failUnlessEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 01:50:00 EST-0500'
)
def testPartialMinuteOffsets(self):
# utcoffset in Amsterdam was not a whole minute until 1937
# However, we fudge this by rounding them, as the Python
# datetime library
tz = pytz.timezone('Europe/Amsterdam')
utc_dt = datetime(1914, 1, 1, 13, 40, 28, tzinfo=UTC) # correct
utc_dt = utc_dt.replace(second=0) # But we need to fudge it
loc_dt = utc_dt.astimezone(tz)
self.failUnlessEqual(
loc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 14:00:00 AMT+0020'
)
# And get back...
utc_dt = loc_dt.astimezone(UTC)
self.failUnlessEqual(
utc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 13:40:00 UTC+0000'
)
def no_testCreateLocaltime(self):
# It would be nice if this worked, but it doesn't.
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 10, 31, 2, 0, 0, tzinfo=tz)
self.failUnlessEqual(
dt.strftime(fmt),
'2004-10-31 02:00:00 CET+0100'
)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite('pytz'))
suite.addTest(doctest.DocTestSuite('pytz.tzinfo'))
import test_tzinfo
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_tzinfo))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 |
wootencl/D3_Jupyter_Data_Visualization | xlrd-0.9.4/xlrd/examples/xlrdnameAPIdemo.py | 34 | 7138 | # -*- coding: cp1252 -*-
##
# Module/script example of the xlrd API for extracting information
# about named references, named constants, etc.
#
# <p>Copyright © 2006 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p>
##
from __future__ import print_function
import xlrd
from xlrd.timemachine import REPR
import sys
import glob
def scope_as_string(book, scope):
if 0 <= scope < book.nsheets:
return "sheet #%d (%r)" % (scope, REPR(book.sheet_names()[scope]))
if scope == -1:
return "Global"
if scope == -2:
return "Macro/VBA"
return "Unknown scope value (%r)" % REPR(scope)
def do_scope_query(book, scope_strg, show_contents=0, f=sys.stdout):
try:
qscope = int(scope_strg)
except ValueError:
if scope_strg == "*":
qscope = None # means "all'
else:
# so assume it's a sheet name ...
qscope = book.sheet_names().index(scope_strg)
print("%r => %d" % (scope_strg, qscope), file=f)
for nobj in book.name_obj_list:
if qscope is None or nobj.scope == qscope:
show_name_object(book, nobj, show_contents, f)
def show_name_details(book, name, show_contents=0, f=sys.stdout):
"""
book -- Book object obtained from xlrd.open_workbook().
name -- The name that's being investigated.
show_contents -- 0: Don't; 1: Non-empty cells only; 2: All cells
f -- Open output file handle.
"""
name_lcase = name.lower() # Excel names are case-insensitive.
nobj_list = book.name_map.get(name_lcase)
if not nobj_list:
print("%r: unknown name" % name, file=f)
return
for nobj in nobj_list:
show_name_object(book, nobj, show_contents, f)
def show_name_details_in_scope(
book, name, scope_strg, show_contents=0, f=sys.stdout,
):
try:
scope = int(scope_strg)
except ValueError:
# so assume it's a sheet name ...
scope = book.sheet_names().index(scope_strg)
print("%r => %d" % (scope_strg, scope), file=f)
name_lcase = name.lower() # Excel names are case-insensitive.
while 1:
nobj = book.name_and_scope_map.get((name_lcase, scope))
if nobj:
break
print("Name %s not found in scope %d" % (REPR(name), scope), file=f)
if scope == -1:
return
scope = -1 # Try again with global scope
print("Name %s found in scope %d" % (REPR(name), scope), file=f)
show_name_object(book, nobj, show_contents, f)
def showable_cell_value(celltype, cellvalue, datemode):
if celltype == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cellvalue, datemode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
elif celltype == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(
cellvalue, '<Unknown error code 0x%02x>' % cellvalue)
else:
showval = cellvalue
return showval
def show_name_object(book, nobj, show_contents=0, f=sys.stdout):
print("\nName: %s, scope: %s (%s)" \
% (REPR(nobj.name), REPR(nobj.scope), scope_as_string(book, nobj.scope)), file=f)
res = nobj.result
print("Formula eval result: %s" % REPR(res), file=f)
if res is None:
return
# result should be an instance of the Operand class
kind = res.kind
value = res.value
if kind >= 0:
# A scalar, or unknown ... you've seen all there is to see.
pass
elif kind == xlrd.oREL:
# A list of Ref3D objects representing *relative* ranges
for i in range(len(value)):
ref3d = value[i]
print("Range %d: %s ==> %s"% (i, REPR(ref3d.coords), REPR(xlrd.rangename3drel(book, ref3d))), file=f)
elif kind == xlrd.oREF:
# A list of Ref3D objects
for i in range(len(value)):
ref3d = value[i]
print("Range %d: %s ==> %s"% (i, REPR(ref3d.coords), REPR(xlrd.rangename3d(book, ref3d))), file=f)
if not show_contents:
continue
datemode = book.datemode
for shx in range(ref3d.shtxlo, ref3d.shtxhi):
sh = book.sheet_by_index(shx)
print(" Sheet #%d (%s)" % (shx, sh.name), file=f)
rowlim = min(ref3d.rowxhi, sh.nrows)
collim = min(ref3d.colxhi, sh.ncols)
for rowx in range(ref3d.rowxlo, rowlim):
for colx in range(ref3d.colxlo, collim):
cty = sh.cell_type(rowx, colx)
if cty == xlrd.XL_CELL_EMPTY and show_contents == 1:
continue
cval = sh.cell_value(rowx, colx)
sval = showable_cell_value(cty, cval, datemode)
print(" (%3d,%3d) %-5s: %s"
% (rowx, colx, xlrd.cellname(rowx, colx), REPR(sval)), file=f)
if __name__ == "__main__":
def usage():
text = """
usage: xlrdnameAIPdemo.py glob_pattern name scope show_contents
where:
"glob_pattern" designates a set of files
"name" is a name or '*' (all names)
"scope" is -1 (global) or a sheet number
or a sheet name or * (all scopes)
"show_contents" is one of 0 (no show),
1 (only non-empty cells), or 2 (all cells)
Examples (script name and glob_pattern arg omitted for brevity)
[Searching through book.name_obj_list]
* * 0 lists all names
* * 1 lists all names, showing referenced non-empty cells
* 1 0 lists all names local to the 2nd sheet
* Northern 0 lists all names local to the 'Northern' sheet
* -1 0 lists all names with global scope
[Initial direct access through book.name_map]
Sales * 0 lists all occurrences of "Sales" in any scope
[Direct access through book.name_and_scope_map]
Revenue -1 0 checks if "Revenue" exists in global scope
"""
sys.stdout.write(text)
if len(sys.argv) != 5:
usage()
sys.exit(0)
arg_pattern = sys.argv[1] # glob pattern e.g. "foo*.xls"
arg_name = sys.argv[2] # see below
arg_scope = sys.argv[3] # see below
arg_show_contents = int(sys.argv[4]) # 0: no show, 1: only non-empty cells,
# 2: all cells
for fname in glob.glob(arg_pattern):
book = xlrd.open_workbook(fname)
if arg_name == "*":
# Examine book.name_obj_list to find all names
# in a given scope ("*" => all scopes)
do_scope_query(book, arg_scope, arg_show_contents)
elif arg_scope == "*":
# Using book.name_map to find all usage of a name.
show_name_details(book, arg_name, arg_show_contents)
else:
# Using book.name_and_scope_map to find which if any instances
# of a name are visible in the given scope, which can be supplied
# as -1 (global) or a sheet number or a sheet name.
show_name_details_in_scope(book, arg_name, arg_scope, arg_show_contents)
| mit |
khosrow/metpx | pxStats/bin/tools/fileRenamer.py | 1 | 22632 | #! /usr/bin/env python
"""
#############################################################################################
#
#
# @Name : fileRenamer
#
# @author: Nicholas Lemay
#
# @since: 2007-05-24
#
# @licence : MetPX Copyright (C) 2004-2006 Environment Canada
# MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
# named COPYING in the root of the source directory tree.
#
# @summary: This program is to be used to rename all the files wich are named after a
# certain machine name into another machine's name.
#
# Usage: This program can be called from a crontab or from command-line.
#
# For informations about command-line: fileRenamer -h | --help
#
#
##############################################################################################
"""
import commands, os, sys
sys.path.insert(1,os.path.dirname( os.path.abspath(__file__) ) + '/../../../')
from optparse import OptionParser
from fnmatch import fnmatch
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.LanguageTools import LanguageTools
LOCAL_MACHINE = os.uname()[1]
CURRENT_MODULE_ABS_PATH = os.path.abspath(__file__).replace( ".pyc", ".py" )
class Parameters:
def __init__( self, clientNames, groupNames, machineNames, overrideConfirmation, newValue, oldValue ):
"""
@param clientNames: Whether the change affects the client names or not.
@param groupNames: Whether the change affects the group names or not.
@param machineNames : Whether the change affects the machine names or not.
@param overrideConfirmation:
@param newValue: Old value that needs to be changes.
@param oldValue: New value that will replace the old value.
"""
self.clientNames = clientNames
self.groupNames = groupNames
self.machineNames = machineNames
self.overrideConfirmation = overrideConfirmation
self.newValue = newValue
self.oldValue = oldValue
def getOptionsFromParser( parser ):
"""
@summary : This method parses the argv
received when the program
was called and returns the
parameters.
@return : Options instance containing
the parsed values.
"""
( options, args ) = parser.parse_args()
clientNames = options.clientNames
groupNames = options.groupNames
overrideConfirmation = options.overrideConfirmation
oldValue = options.oldValue.replace( " ","" )
newValue = options.newValue.replace( ' ','' )
newOptions = Parameters( clientNames, groupNames, overrideConfirmation, oldValue, newValue )
return newOptions
def createParser( ):
"""
Builds and returns the parser
"""
usage = _( """
%prog [options]
********************************************
* See doc.txt for more details. *
********************************************
Defaults :
- Default oldMachineName is None.
- Default newMachineName is None.
- Default overrideConfirmation value is False.
Options:
- With -h|--help you can get help on this program.
- With -n|--newValue you can specify the name of the new machine.
- With -o|--oldValue you can specify the name of the old machine.
- With --overrideConfirmation you can specify that you want to override the confirmation request.
Ex1: %prog -h --> Help will be displayed.
Ex2: %prog -o 'machine1' -n 'machine2' --> Convert machine1 to machine2.
Ex3: %prog -o 'm1' -n 'm2' --overrideConfirmation --> M1 to m2, no confirmations asked.
********************************************
* See /doc.txt for more details. *
********************************************""" )
parser = OptionParser( usage )
addOptions( parser )
return parser
def addOptions( parser ):
"""
@summary: This method is used to add all available options to the option parser.
@param parser: parser to wich the options need to be added.
"""
parser.add_option( "-c", "--clientNames", action="store_true", dest = "clientNames", default=False, help= _( "Use if you want to rename files based on a change of client names." ) )
parser.add_option( "-g", "--groupNames", action="store_true", dest = "groupNames", default=False, help= _( "Use if you want to rename files based on a change of group names." ) )
parser.add_option( "-m", "--machineNames", action="store_true", dest = "machineNames", default=False, help= _( "Use if you want to rename files based on a change of machine names.") )
parser.add_option( "-o", "--oldValue", action="store", type="string", dest="oldMachineName", default="",
help=_( "Name of the old machine." ) )
parser.add_option( "-n", "--newValue", action="store", type="string", dest="newMachineName", default="", help=_( "Name of the new machine name.") )
parser.add_option( "--overrideConfirmation", action="store_true", dest = "overrideConfirmation", default=False, help=_( "Whether or not to override the confirmation request." ) )
def validateParameters( parameters ):
"""
@summary : Validates the content of the Parameters() instance.
@param parameters: Parameters() instance containing
the values chosen by the user.
@note : If illegal use of parameters is found,
application will be terminated.
"""
parameters = Parameters()
if parameters.clientNames == False and parameters.groupNames == False and parameters.machineNames == False :
print _( "Error. You need to choose what kind of change needs to be made." )
print _( "Please select between clientNames, groupNames and machineName. Use -h for further help." )
print _( "Program terminated." )
sys.exit()
elif (parameters.clientNames ^ parameters.groupNames ^ parameters.machineNames ) == False :
print _( "Error. You can only select a single kind of change to be made." )
print _( "Please select between clientNames, groupNames and machineName. Use -h for further help." )
print _( "Program terminated." )
sys.exit()
elif parameters.newValue == "" or parameters.oldValue == "":
print _( "Error. You need to specify both a newValue and an oldValue." )
print _( "Please use the --newValue and --oldValue options. Use -h for further help." )
print _( "Program terminated." )
sys.exit()
elif parameters.newValue == parameters.oldValue :
print _( "Error. The new value needs to be different from the old value." )
print _( "Please make sure values specified with the --newValue and --oldValue options are different. Use -h for further help." )
print _( "Program terminated." )
sys.exit()
def filterentriesStartingWithDots(x):
"""
When called within pythons builtin
filter method will remove all entries
starting with a dot.
"""
return not fnmatch( x, ".*" )
def renameCurrentDatabasesTimesOfUpdates( oldMachineName, newMachineName ):
"""
@summary: Renames all the databases updates sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir( StatsPaths.STATSCURRENTDBUPDATES ):
fileTypeDirs = os.listdir( StatsPaths.STATSCURRENTDBUPDATES )
fileTypeDirs = filter( filterentriesStartingWithDots ,fileTypeDirs)
for fileTypeDir in fileTypeDirs:
path = StatsPaths.STATSCURRENTDBUPDATES + fileTypeDir + '/'
if os.path.isdir(path) :
files = os.listdir(path )
for file in files:
if fnmatch(file, '*_' + oldMachineName ) :
source = path + file
splitName = file.split('_')
newFile = splitName[0] + '_' + splitName[1].replace(oldMachineName, newMachineName)
destination = path + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renameCurrentDatabases( oldMachineName, newMachineName ):
"""
@summary: Renames all the databases sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir( StatsPaths.STATSCURRENTDB ) :
dataTypeDirs = os.listdir( StatsPaths.STATSCURRENTDB )
dataTypeDirs = filter( filterentriesStartingWithDots, dataTypeDirs )
for dataTypeDir in dataTypeDirs:
path = StatsPaths.STATSCURRENTDB + dataTypeDir + '/'
if os.path.isdir(path):
files = os.listdir( path )
for file in files:
if fnmatch(file, '*_' + oldMachineName ) :
source = path + file
splitName = file.split('_')
newFile = splitName[0] + '_' + splitName[1].replace(oldMachineName, newMachineName)
destination = path + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renameDatabaseBackups(oldMachineName, newMachineName ):
"""
@summary: Renames all the database backups sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir(StatsPaths.STATSDBBACKUPS) :
backupDatesDirs = os.listdir( StatsPaths.STATSDBBACKUPS )
backupDatesDirs = filter( filterentriesStartingWithDots, backupDatesDirs )
for backupDatesDir in backupDatesDirs:
path = StatsPaths.STATSDBBACKUPS + backupDatesDir + '/'
if os.path.isdir(path):
dataTypeDirs = os.listdir( path )
for dataTypeDir in dataTypeDirs:
path = StatsPaths.STATSDBBACKUPS + backupDatesDir+ '/' + dataTypeDir + '/'
files = os.listdir( path )
for file in files:
if fnmatch(file, '*_' + oldMachineName ) :
source = path + file
splitName = file.split('_')
newFile = splitName[0] + '_' + splitName[1].replace(oldMachineName, newMachineName)
destination = path + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renamesDatabaseBackupsTimesOfUpdates( oldMachineName, newMachineName ):
"""
@summary: Renames all the database time of updates backups sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir(StatsPaths.STATSDBUPDATESBACKUPS):
backupDatesDirs = os.listdir( StatsPaths.STATSDBUPDATESBACKUPS )
backupDatesDirs = filter( filterentriesStartingWithDots, backupDatesDirs )
for backupDatesDir in backupDatesDirs:
path = StatsPaths.STATSDBUPDATESBACKUPS + backupDatesDir + "/"
if os.path.isdir(path) :
fileTypeDirs = os.listdir( StatsPaths.STATSDBUPDATESBACKUPS + backupDatesDir )
for fileTypeDir in fileTypeDirs:
path = StatsPaths.STATSDBUPDATESBACKUPS + backupDatesDir+ '/' + fileTypeDir + '/'
files = os.listdir( path )
for file in files:
if fnmatch(file, '*_' + oldMachineName ) :
source = path + file
splitName = file.split('_')
newFile = splitName[0] + '_' + splitName[1].replace(oldMachineName, newMachineName)
destination = path + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renameDatabases( oldMachineName, newMachineName ):
"""
@summary: Renames all the pickles sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
renameCurrentDatabases( oldMachineName, newMachineName )
renameCurrentDatabasesTimesOfUpdates(oldMachineName, newMachineName )
renameDatabaseBackups(oldMachineName, newMachineName )
renamesDatabaseBackupsTimesOfUpdates( oldMachineName, newMachineName )
def renamePickles( oldMachineName, newMachineName ):
"""
@summary: Renames all the pickles sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir( StatsPaths.STATSPICKLES ) :
clientdirs = os.listdir( StatsPaths.STATSPICKLES )
clientdirs = filter( filterentriesStartingWithDots, clientdirs )
for clientDir in clientdirs:
if os.path.isdir( StatsPaths.STATSPICKLES + clientDir ):
dateDirs = os.listdir( StatsPaths.STATSPICKLES + clientDir )
for dateDir in dateDirs :
if os.path.isdir(StatsPaths.STATSPICKLES + clientDir + '/' + dateDir) :
fileTypes = os.listdir( StatsPaths.STATSPICKLES + clientDir + '/' + dateDir )
for fileType in fileTypes:
path = StatsPaths.STATSPICKLES + clientDir + '/' + dateDir + '/' + fileType + "/"
if os.path.isdir(path) :
files = os.listdir( path )
for file in files:
if fnmatch(file, oldMachineName + '*' ) :
source = path + file
newFile = file.replace(oldMachineName, newMachineName, 1)
destination = path + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renameFileVersions( oldMachineName, newMachineName ):
"""
@summary: Renames all the file version files sporting a certain machine name's( oldMachineName )
so that they now sport the name of another machine(newMachineName).
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
"""
if os.path.isdir(StatsPaths.STATSFILEVERSIONS) :
files = os.listdir( StatsPaths.STATSFILEVERSIONS )
files = filter( filterentriesStartingWithDots, files )
for file in files:
print file
if fnmatch(file, oldMachineName + '_*' ) :
source = StatsPaths.STATSFILEVERSIONS + file
newFile = file.replace(oldMachineName, newMachineName,1)
destination = StatsPaths.STATSFILEVERSIONS + newFile
#print "mv %s %s " %( source, destination )
status, output = commands.getstatusoutput( "mv %s %s" %(source,destination) )
def renameGroupInConfigFile( oldName, newName ):
"""
"""
fileHandle = open( open( StatsPaths.STATSETC + 'config' ), "r" )
linesFromConfigFile = fileHandle.readlines()
for i in range( len( linesFromConfigFile ) ):
name = ( linesFromConfigFile[i].split( "=" )[0] ).replace( " ", "" )
if name == oldName:
linesFromConfigFile[i] = linesFromConfigFile[i].replace( oldName, newName )
break
fileHandle.close()
fileHandle = open( open( StatsPaths.STATSETC + 'config' ), "w" )
fileHandle.writelines( linesFromConfigFile )
fileHandle.close()
def getConfirmation( oldMachineName, newMachineName ):
"""
@summary: asks user if he is sure he wants to rename
all of the pxStats files found on his machine.
@param oldMachineName: Name of the old machine wich needs to be renamed
@param newMachineName: Name of the new machine into wich the pickles will be renamed.
@return: Returns true if confirmation was made, false if it wasn't.
"""
confirmation = False
os.system( 'clear' )
print _( """
###########################################################
# pickleRenamer.py #
# MetPX Copyright (C) 2004-2006 Environment Canada #
###########################################################
""" )
question = _( "Are you sure you want to rename all %s file to %s files (y or n) ? ") %(oldMachineName, newMachineName)
answer = raw_input( question ).replace(' ','').replace( '\n','')
answer = answer.lower()
while( answer != _('y') and answer != _('n') ):
print _("Error. You must either enter y or n.")
answer = raw_input( question )
if answer == _('y'):
confirmation = True
return confirmation
def doRenamingForClients( parameters ):
"""
@summary : Renames files based on client names.
@param parameters: Parameters with whom this program was called.
@return : None
"""
#renameFileVersions()
#renamePickles()
#renameDatabases()
x =2
def doRenamingForGroups( parameters ):
"""
@summary : Renames files based on group names.
@param parameters: Parameters with whom this program was called.
@return : None
"""
x = 2
def doRenamingForMachines( parameters ):
"""
@summary : Renames files based on machine names.
@param parameters: Parameters with whom this program was called.
@return : None
"""
x =2
def setGlobalLanguageParameters():
"""
@summary : Sets up all the needed global language
tranlator so that it can be used
everywhere in this program.
@Note : The scope of the global _ function
is restrained to this module only and
does not cover the entire project.
@return: None
"""
global _
_ = LanguageTools.getTranslatorForModule( CURRENT_MODULE_ABS_PATH )
def main():
"""
@summary: renames all the files
wich are named after a
certain machine name to
another machine name.
"""
setGlobalLanguageParameters()
manualConfirmation = False
overrideConfirmation = False
parser = createParser( ) #will be used to parse options
parameters = getOptionsFromParser( parser )
validateParameters( parameters )
if overrideConfirmation == False:
manualConfirmation = getConfirmation( parameters )
if overrideConfirmation == True or manualConfirmation == True:
if parameters.clientNames == True :
doRenamingForClients()
elif parameters.groupNames == True :
doRenamingForGroups()
else :
doRenamingForMachines()
else:
print _("Program terminated.")
if __name__ == "__main__":
main() | gpl-2.0 |
brownian/frescobaldi | frescobaldi_app/qpageview/view.py | 2 | 15220 | # This file is part of the qpageview package.
#
# Copyright (c) 2016 - 2016 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The View, deriving from QAbstractScrollArea.
"""
import contextlib
from PyQt5.QtCore import pyqtSignal, QPoint, QSize, Qt
from PyQt5.QtGui import QPainter, QPalette
from PyQt5.QtWidgets import QStyle
from . import layout
from . import scrollarea
from .constants import (
# rotation:
Rotate_0,
Rotate_90,
Rotate_180,
Rotate_270,
# viewModes:
FixedScale,
FitWidth,
FitHeight,
FitBoth,
)
class View(scrollarea.ScrollArea):
MIN_ZOOM = 0.05
MAX_ZOOM = 8.0
viewModeChanged = pyqtSignal(int)
rotationChanged = pyqtSignal(int)
zoomFactorChanged = pyqtSignal(float)
scrollupdatespersec = 50
def __init__(self, parent=None, **kwds):
super().__init__(parent, **kwds)
self._prev_pages_to_paint = set()
self._viewMode = FixedScale
self._pageLayout = layout.PageLayout()
self._magnifier = None
self._rubberband = None
self.viewport().setBackgroundRole(QPalette.Dark)
self.verticalScrollBar().setSingleStep(20)
self.horizontalScrollBar().setSingleStep(20)
self.setMouseTracking(True)
def loadPdf(self, filename):
"""Convenience method to load the specified PDF file."""
import popplerqt5
from . import poppler
doc = popplerqt5.Poppler.Document.load(filename)
renderer = poppler.Renderer()
self.pageLayout()[:] = poppler.PopplerPage.createPages(doc, renderer)
self.updatePageLayout()
def loadSvgs(self, filenames):
"""Convenience method to load the specified list of SVG files.
Each SVG file is loaded in one Page.
"""
from . import svg
renderer = svg.Renderer()
self.pageLayout()[:] = (svg.SvgPage(f, renderer) for f in filenames)
self.updatePageLayout()
def setPageLayout(self, layout):
"""Set our current PageLayout instance."""
self._pageLayout = layout
def pageLayout(self):
"""Return our current PageLayout instance."""
return self._pageLayout
def updatePageLayout(self):
"""Update layout and adjust scrollbars."""
self._pageLayout.update()
self._updateScrollBars()
self.viewport().update()
def clear(self):
"""Convenience method to clear the current layout."""
self._pageLayout.clear()
self.updatePageLayout()
def setViewMode(self, mode):
"""Sets the current ViewMode."""
if mode == self._viewMode:
return
self._viewMode = mode
if mode:
self._fitLayout()
self.viewModeChanged.emit(mode)
def viewMode(self):
"""Returns the current ViewMode."""
return self._viewMode
def setRotation(self, rotation):
"""Set the current rotation."""
layout = self._pageLayout
if rotation != layout.rotation:
with self._keepCentered():
layout.rotation = rotation
if self._viewMode:
self._fitLayout()
self.rotationChanged.emit(rotation)
def rotation(self):
"""Return the current rotation."""
return self._pageLayout.rotation
def rotateLeft(self):
"""Rotate the pages 270 degrees."""
self.setRotation((self.rotation() - 1) & 3)
def rotateRight(self):
"""Rotate the pages 90 degrees."""
self.setRotation((self.rotation() + 1) & 3)
def setMagnifier(self, magnifier):
"""Sets the Magnifier to use (or None to disable the magnifier).
The viewport takes ownership of the Magnifier.
"""
if self._magnifier:
self.removeEventFilter(self._magnifier)
self._magnifier.setParent(None)
self._magnifier = magnifier
if magnifier:
magnifier.setParent(self.viewport())
self.installEventFilter(magnifier)
def magnifier(self):
"""Returns the currently set magnifier."""
return self._magnifier
def setRubberband(self, rubberband):
"""Sets the Rubberband to use for selections (or None to not use one)."""
if self._rubberband:
self.removeEventFilter(self._rubberband)
self.zoomFactorChanged.disconnect(self._rubberband.hide)
self._rubberband.setParent(None)
self._rubberband = rubberband
if rubberband:
rubberband.setParent(self.viewport())
self.installEventFilter(rubberband)
self.zoomFactorChanged.connect(rubberband.hide)
def scrollContentsBy(self, dx, dy):
"""Reimplemented to move the rubberband as well."""
if self._rubberband:
self._rubberband.scrollBy(QPoint(dx, dy))
self.viewport().update()
def _fitLayout(self):
"""(Internal). Fits the layout according to the view mode.
Prevents scrollbar/resize loops by precalculating which scrollbars will appear.
"""
mode = self.viewMode()
if mode == FixedScale:
return
maxsize = self.maximumViewportSize()
# can vertical or horizontal scrollbars appear?
vcan = self.verticalScrollBarPolicy() == Qt.ScrollBarAsNeeded
hcan = self.horizontalScrollBarPolicy() == Qt.ScrollBarAsNeeded
# width a scrollbar takes off the viewport size
framewidth = 0
if self.style().styleHint(QStyle.SH_ScrollView_FrameOnlyAroundContents, None, self):
framewidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth) * 2
scrollbarextent = self.style().pixelMetric(QStyle.PM_ScrollBarExtent, None, self) + framewidth
# remember old factor
zoom_factor = self.zoomFactor()
# first try to fit full size
layout = self._pageLayout
layout.fit(maxsize, mode)
layout.update()
# minimal values
minwidth = maxsize.width()
minheight = maxsize.height()
if vcan:
minwidth -= scrollbarextent
if hcan:
minheight -= scrollbarextent
# do width and/or height fit?
fitw = layout.width <= maxsize.width()
fith = layout.height <= maxsize.height()
if not fitw and not fith:
if vcan or hcan:
layout.fit(QSize(minwidth, minheight), mode)
elif mode & FitWidth and fitw and not fith and vcan:
# a vertical scrollbar will appear
w = minwidth
layout.fit(QSize(w, maxsize.height()), mode)
layout.update()
if layout.height <= maxsize.height():
# now the vert. scrollbar would disappear!
# enlarge it as long as the vertical scrollbar would not be needed
while True:
w += 1
layout.fit(QSize(w, maxsize.height()), mode)
layout.update()
if layout.height > maxsize.height():
layout.fit(QSize(w - 1, maxsize.height()), mode)
break
elif mode & FitHeight and fith and not fitw and hcan:
# a horizontal scrollbar will appear
h = minheight
layout.fit(QSize(maxsize.width(), h), mode)
layout.update()
if layout.width <= maxsize.width():
# now the horizontal scrollbar would disappear!
# enlarge it as long as the horizontal scrollbar would not be needed
while True:
h += 1
layout.fit(QSize(maxsize.width(), h), mode)
layout.update()
if layout.width > maxsize.width():
layout.fit(QSize(maxsize.width(), h - 1), mode)
break
self.updatePageLayout()
if zoom_factor != self.zoomFactor():
self.zoomFactorChanged.emit(self.zoomFactor())
@contextlib.contextmanager
def _keepCentered(self, pos=None, on_page=False):
"""Context manager to keep the same spot centered while changing the layout.
If pos is not given, the viewport's center is used. If on_page is True,
a position on a page is maintained if found. Otherwise, just the
position on the layout is kept.
"""
if pos is None:
pos = self.viewport().rect().center()
# find the spot on the page
layout = self._pageLayout
layout_pos = self.layoutPosition()
pos_on_layout = pos - layout_pos
page = layout.pageAt(pos_on_layout) if on_page else None
if page:
pos_on_page = pos_on_layout - page.pos()
x = pos_on_page.x() / page.width
y = pos_on_page.y() / page.height
else:
x = pos_on_layout.x() / layout.width
y = pos_on_layout.y() / layout.height
yield
self.updatePageLayout()
if page:
new_pos_on_page = QPoint(round(x * page.width), round(y * page.height))
new_pos_on_layout = page.pos() + new_pos_on_page
else:
new_pos_on_layout = QPoint(round(x * layout.width), round(y * layout.height))
diff = new_pos_on_layout - pos
self.verticalScrollBar().setValue(diff.y())
self.horizontalScrollBar().setValue(diff.x())
def setZoomFactor(self, factor, pos=None):
"""Set the zoom factor (1.0 by default).
If pos is given, that position (in viewport coordinates) is kept in the
center if possible. If None, zooming centers around the viewport center.
"""
factor = max(self.MIN_ZOOM, min(self.MAX_ZOOM, factor))
if factor != self._pageLayout.zoomFactor:
with self._keepCentered(pos, True):
self._pageLayout.zoomFactor = factor
self.setViewMode(FixedScale)
self.zoomFactorChanged.emit(factor)
def zoomFactor(self):
"""Return the page layout's zoom factor."""
return self._pageLayout.zoomFactor
def zoomIn(self, pos=None, factor=1.1):
"""Zoom in.
If pos is given, it is the position in the viewport to keep centered.
Otherwise zooming centers around the viewport center.
"""
self.setZoomFactor(self.zoomFactor() * factor, pos)
def zoomOut(self, pos=None, factor=1.1):
"""Zoom out.
If pos is given, it is the position in the viewport to keep centered.
Otherwise zooming centers around the viewport center.
"""
self.setZoomFactor(self.zoomFactor() / factor, pos)
def _updateScrollBars(self):
"""Adjust the range of the scrollbars to the layout."""
layout = self._pageLayout
maxsize = self.maximumViewportSize()
vbar = self.verticalScrollBar()
hbar = self.horizontalScrollBar()
if layout.width <= maxsize.width() and layout.height <= maxsize.height():
vbar.setRange(0, 0)
hbar.setRange(0, 0)
else:
viewport = self.viewport()
vbar.setRange(0, layout.height - viewport.height())
vbar.setPageStep(viewport.height() * .9)
hbar.setRange(0, layout.width - viewport.width())
hbar.setPageStep(viewport.width() * .9)
def layoutPosition(self):
"""Return the position of the PageLayout relative to the viewport.
This is the top-left position of the layout, relative to the
top-left position of the viewport.
If the layout is smaller than the viewport it is centered.
"""
lw = self._pageLayout.width
vw = self.viewport().width()
left = -self.horizontalScrollBar().value() if lw > vw else (vw - lw) // 2
lh = self._pageLayout.height
vh = self.viewport().height()
top = -self.verticalScrollBar().value() if lh > vh else (vh - lh) // 2
return QPoint(left, top)
def visibleRect(self):
"""Return the QRect of the page layout that is currently visible in the viewport."""
return self.viewport().rect().translated(-self.layoutPosition())
def visiblePages(self):
"""Yield the Page instances that are currently visible."""
return self._pageLayout.pagesAt(self.visibleRect())
def resizeEvent(self, ev):
"""Reimplemented to update the scrollbars."""
if self._viewMode and not self._pageLayout.empty():
# sensible repositioning
vbar = self.verticalScrollBar()
hbar = self.horizontalScrollBar()
x, xm = hbar.value(), hbar.maximum()
y, ym = vbar.value(), vbar.maximum()
self._fitLayout()
if xm: hbar.setValue(round(x * hbar.maximum() / xm))
if ym: vbar.setValue(round(y * vbar.maximum() / ym))
else:
self._updateScrollBars()
def repaintPage(self, page):
"""Call this when you want to redraw the specified page."""
rect = page.rect().translated(self.layoutPosition())
self.viewport().update(rect)
def paintEvent(self, ev):
layout_pos = self.layoutPosition()
painter = QPainter(self.viewport())
# pages to paint
ev_rect = ev.rect().translated(-layout_pos)
pages_to_paint = set(self._pageLayout.pagesAt(ev_rect))
# paint the pages
for p in pages_to_paint:
rect = (p.rect() & ev_rect).translated(-p.pos())
painter.save()
painter.translate(p.pos() + layout_pos)
p.paint(painter, rect, self.repaintPage)
painter.restore()
# TODO paint highlighting
# remove pending render jobs for pages that were visible, but are not
# visible now
margin = 50
rect = ev_rect.adjusted(-margin, -margin, margin, margin)
for page in self._prev_pages_to_paint - pages_to_paint:
if page.renderer and not rect.intersects(page.rect()):
page.renderer.unschedule(page, self.repaintPage)
self._prev_pages_to_paint = pages_to_paint
def wheelEvent(self, ev):
# TEMP
if ev.modifiers() & Qt.CTRL:
factor = 1.1 ** (ev.angleDelta().y() / 120)
if ev.angleDelta().y():
self.setZoomFactor(self.zoomFactor() * factor, ev.pos())
else:
super().wheelEvent(ev)
| gpl-2.0 |
rupran/ansible | lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py | 68 | 6200 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_publicipaddress_facts
version_added: "2.1"
short_description: Get public IP facts.
description:
- Get facts for a specific public IP or all public IPs within a resource group.
options:
name:
description:
- Only show results for a specific Public IP.
required: false
default: null
resource_group:
description:
- Limit results by resource group. Required when using name parameter.
required: false
default: null
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one Public IP
azure_rm_publicip_facts:
resource_group: Testing
name: publicip001
- name: Get facts for all Public IPs within a resource groups
azure_rm_publicip_facts:
resource_group: Testing
'''
RETURN = '''
azure_publicipaddresses:
description: List of public IP address dicts.
returned: always
type: list
example: [{
"etag": 'W/"a31a6d7d-cb18-40a5-b16d-9f4a36c1b18a"',
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/pip2001",
"location": "eastus2",
"name": "pip2001",
"properties": {
"idleTimeoutInMinutes": 4,
"provisioningState": "Succeeded",
"publicIPAllocationMethod": "Dynamic",
"resourceGuid": "29de82f4-a7da-440e-bd3d-9cabb79af95a"
},
"type": "Microsoft.Network/publicIPAddresses"
}]
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureMissingResourceHttpError, AzureHttpError
except:
# This is handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'PublicIp'
class AzureRMPublicIPFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_publicipaddresses=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMPublicIPFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_publicipaddresses'] = self.get_item()
elif self.resource_group:
self.results['ansible_facts']['azure_publicipaddresses'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_publicipaddresses'] = self.list_all()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
pip = self.serialize_obj(item, AZURE_OBJECT_CLASS)
pip['name'] = item.name
pip['type'] = item.type
result = [pip]
return result
def list_resource_group(self):
self.log('List items in resource groups')
try:
response = self.network_client.public_ip_addresses.list(self.resource_group)
except AzureHttpError as exc:
self.fail("Error listing items in resource groups {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
pip = self.serialize_obj(item, AZURE_OBJECT_CLASS)
pip['name'] = item.name
pip['type'] = item.type
results.append(pip)
return results
def list_all(self):
self.log('List all items')
try:
response = self.network_client.public_ip_addresses.list_all()
except AzureHttpError as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
pip = self.serialize_obj(item, AZURE_OBJECT_CLASS)
pip['name'] = item.name
pip['type'] = item.type
results.append(pip)
return results
def main():
AzureRMPublicIPFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
mopidy/mopidy-local-whoosh | mopidy_local_whoosh/library.py | 1 | 7705 | from __future__ import unicode_literals
import logging
import os
import re
import shutil
import sys
from mopidy import local
from mopidy.local import translator
from mopidy.models import Ref, SearchResult
from mopidy.utils import path
import whoosh
import whoosh.fields
import whoosh.index
import whoosh.query
logger = logging.getLogger(__name__)
schema = whoosh.fields.Schema(
uri=whoosh.fields.ID(stored=True, unique=True),
parent=whoosh.fields.ID(stored=True),
pathname=whoosh.fields.ID(stored=True),
type=whoosh.fields.ID(stored=True),
name=whoosh.fields.TEXT(),
artists=whoosh.fields.TEXT(),
album=whoosh.fields.TEXT(),
content=whoosh.fields.TEXT(),
track=whoosh.fields.STORED())
MAPPING = {'uri': 'uri',
'track_name': 'name',
'album': 'album',
'artist': 'artists',
'any': 'content'}
TOKENIZE = ('track_name', 'album', 'artist', 'any')
def _track_to_refs(track):
track_path = translator.local_track_uri_to_path(track.uri, b'/')
track_path = track_path.decode(sys.getfilesystemencoding(), 'replace')
parts = re.findall(r'([^/]+)', track_path)
track_ref = Ref.track(uri=track.uri, name=parts.pop())
refs = [Ref.directory(uri='local:directory')]
for i in range(len(parts)):
directory = '/'.join(parts[:i+1])
uri = translator.path_to_local_directory_uri(directory)
refs.append(Ref.directory(uri=unicode(uri), name=parts[i]))
return refs + [track_ref]
class WhooshLibrary(local.Library):
name = 'whoosh'
def __init__(self, config):
self._data_dir = os.path.join(config['local']['data_dir'], b'whoosh')
self._writer = None
self._counts = None
self._index = None
def load(self):
if not self._index:
if not os.path.exists(self._data_dir):
path.get_or_create_dir(self._data_dir)
self._index = whoosh.index.create_in(self._data_dir, schema)
else:
# TODO: this can fail on bad index versions
self._index = whoosh.index.open_dir(self._data_dir)
self._index.refresh()
with self._index.searcher() as searcher:
return searcher.doc_frequency('type', 'track')
def lookup(self, uri):
assert self._index, 'load() must have been called at least once'
with self._index.searcher() as searcher:
result = searcher.document(uri=uri, type='track')
if result:
return result['track']
return None
def browse(self, uri):
assert self._index, 'load() must have been called at least once'
result = []
with self._index.searcher() as searcher:
query = whoosh.query.Term('parent', uri)
for doc in searcher.search(query, limit=None):
if doc['type'] == 'track':
ref = Ref.track(uri=doc['uri'], name=doc['pathname'])
else:
ref = Ref.directory(uri=doc['uri'], name=doc['pathname'])
result.append(ref)
result.sort(key=lambda ref: (ref.type, ref.name))
return result
# TODO: add limit and offset, and total to results
def search(self, query=None, limit=100, offset=0, uris=None, exact=False):
assert self._index, 'load() must have been called at least once'
parts = []
for name, values in query.items():
if name not in MAPPING:
logger.debug('Skipping field: %s', name)
continue
terms = []
field_name = MAPPING[name]
field = schema[field_name]
for value in values:
tokens = field.process_text(value, mode="query")
if name not in TOKENIZE:
term = whoosh.query.Term(field_name, value)
elif exact:
term = whoosh.query.Phrase(field_name, list(tokens))
else:
term = whoosh.query.And([
whoosh.query.FuzzyTerm(field_name, t) for t in tokens])
terms.append(term)
parts.append(whoosh.query.Or(terms))
if not parts:
logger.debug('Aborting search due to empty query.')
return SearchResult(tracks=[])
parts.append(whoosh.query.Term('type', 'track'))
whoosh_query = whoosh.query.And(parts)
logger.debug('Performing search: %s', whoosh_query)
with self._index.searcher() as searcher:
results = searcher.search(whoosh_query, limit=limit)
tracks = [result['track'] for result in results]
return SearchResult(tracks=tracks)
def begin(self):
assert self._index, 'load() must have been called at least once'
self._writer = self._index.writer()
self._counts = {}
with self._index.reader() as reader:
# We don't use iter_docs as it does the same as this, but breaks
# backwards compatibility pre 2.5
for docnum in reader.all_doc_ids():
doc = reader.stored_fields(docnum)
self._counts.setdefault(doc['parent'], 0)
self._counts[doc['parent']] += 1
if doc['type'] == 'directory':
self._counts.setdefault(doc['uri'], 0)
elif doc['type'] == 'track':
yield doc['track']
def add(self, track):
assert self._writer, 'begin() must have been called'
content = [track.name, track.album.name]
content.extend(a.name for a in track.artists)
refs = _track_to_refs(track)
# Add track to search index:
self._writer.update_document(
uri=unicode(track.uri), type='track',
parent=refs[-2].uri, pathname=refs[-1].name,
name=track.name, album=track.album.name,
artists=u' '.join(a.name for a in track.artists),
content=u' '.join([c for c in content if c]), track=track)
# Add any missing directories to search index:
for i in reversed(range(1, len(refs)-1)):
uri = unicode(refs[i].uri)
name = refs[i].name
parent = unicode(refs[i-1].uri)
self._counts.setdefault(uri, 0)
self._counts[uri] += 1
if self._counts[uri] > 1:
break
self._writer.update_document(
uri=uri, type='directory', parent=parent, pathname=name)
def remove(self, uri):
assert self._writer, 'begin() must have been called'
# Traverse up tree as long as dir is empty, also handles initial track
while self._counts.get(uri, 0) < 1:
# Lookup the uri to get its parent.
with self._index.searcher() as searcher:
result = searcher.document(uri=uri)
# Delete the uri and remove its count if it had one.
self._writer.delete_by_term('uri', uri)
self._counts.pop(uri, None)
if not result:
break
# Move up to the parent and reduce its count by one.
uri = result['parent']
self._counts[uri] -= 1
def flush(self):
assert self._writer, 'begin() must have been called'
self._writer.commit(merge=False)
self._writer = self._index.writer()
return True
def close(self):
assert self._writer, 'begin() must have been called'
self._writer.commit(optimize=True)
def clear(self):
try:
shutil.rmtree(self._data_dir)
return True
except OSError:
return False
| apache-2.0 |
dendisuhubdy/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 43 | 3572 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
angelman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/newstringio_unittest.py | 124 | 1811 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for newstringio module."""
import unittest2 as unittest
import newstringio
class NewStringIOTest(unittest.TestCase):
def test_with(self):
with newstringio.StringIO("foo") as f:
contents = f.read()
self.assertEqual(contents, "foo")
| bsd-3-clause |
svohara/pyvision | src/pyvision/edge/canny.py | 1 | 4157 | # PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import os.path
import cv2.cv as cv
import pyvision as pv
#import numpy as np
#from scipy.ndimage import convolve
#from scipy.ndimage import maximum_filter
def canny(im,threshold1=40.0,threshold2=100.0,aperture_size=3,sigma=None):
'''
void cvCanny( const CvArr* image, CvArr* edges, double threshold1, double threshold2, int aperture_size=3 );
'''
gray = im.asOpenCVBW()
edges = cv.CreateImage( cv.GetSize(gray), 8, 1 );
if sigma!=None:
cv.Smooth(gray,gray,cv.CV_GAUSSIAN,int(sigma+1)*4+1,int(sigma+1)*4+1,sigma,sigma)
if threshold1 < threshold2:
threshold1, threshold2 = threshold2,threshold1
cv.Canny(gray,edges,threshold1,threshold2 ,aperture_size)
return pv.Image(edges)
class _TestCanny(unittest.TestCase):
''' Unit tests for the canny detector'''
def setUp(self):
self.show_results = False
def test_canny1(self):
'''
This will run the code, but what is a good test for canny?
'''
filename = os.path.join(pv.__path__[0],'data','nonface','NONFACE_46.jpg')
img = pv.Image(filename)
out = canny(img)
if self.show_results: out.show()
def test_canny2(self):
'''
This will run the code, but what is a good test for canny?
'''
filename = os.path.join(pv.__path__[0],'data','nonface','NONFACE_10.jpg')
img = pv.Image(filename)
out = canny(img)
if self.show_results: out.show()
def test_canny3(self):
'''
This will run the code, but what is a good test for canny?
'''
filename = os.path.join(pv.__path__[0],'data','nonface','NONFACE_22.jpg')
img = pv.Image(filename)
out = canny(img)
if self.show_results: out.show()
def test_canny4(self):
'''
This will run the code, but what is a good test for canny?
'''
filename = os.path.join(pv.__path__[0],'data','nonface','NONFACE_44.jpg')
img = pv.Image(filename)
out = canny(img)
if self.show_results: out.show()
def test_canny5(self):
'''
This will run the code, but what is a good test for canny?
'''
filename = os.path.join(pv.__path__[0],'data','nonface','NONFACE_37.jpg')
img = pv.Image(filename)
out = canny(img)
if self.show_results: out.show()
| bsd-3-clause |
sfriesel/libcloud | libcloud/compute/drivers/cloudsigma.py | 23 | 67813 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Drivers for CloudSigma API v1.0 and v2.0.
"""
import re
import time
import copy
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import str2dicts, str2list, dict2str
from libcloud.common.base import ConnectionUserAndKey, JsonResponse, Response
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.common.cloudsigma import INSTANCE_TYPES
from libcloud.common.cloudsigma import API_ENDPOINTS_1_0
from libcloud.common.cloudsigma import API_ENDPOINTS_2_0
from libcloud.common.cloudsigma import DEFAULT_API_VERSION, DEFAULT_REGION
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import NodeDriver, NodeSize, Node
from libcloud.compute.base import NodeImage
from libcloud.compute.base import is_private_subnet
from libcloud.utils.iso8601 import parse_date
from libcloud.utils.misc import get_secure_random_string
__all__ = [
'CloudSigmaNodeDriver',
'CloudSigma_1_0_NodeDriver',
'CloudSigma_2_0_NodeDriver',
'CloudSigmaError',
'CloudSigmaNodeSize',
'CloudSigmaDrive',
'CloudSigmaTag',
'CloudSigmaSubscription',
'CloudSigmaFirewallPolicy',
'CloudSigmaFirewallPolicyRule'
]
class CloudSigmaNodeDriver(NodeDriver):
name = 'CloudSigma'
website = 'http://www.cloudsigma.com/'
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is CloudSigmaNodeDriver:
if api_version == '1.0':
cls = CloudSigma_1_0_NodeDriver
elif api_version == '2.0':
cls = CloudSigma_2_0_NodeDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(CloudSigmaNodeDriver, cls).__new__(cls)
class CloudSigmaException(Exception):
def __str__(self):
return self.args[0]
def __repr__(self):
return "<CloudSigmaException '%s'>" % (self.args[0])
class CloudSigmaInsufficientFundsException(Exception):
def __repr__(self):
return "<CloudSigmaInsufficientFundsException '%s'>" % (self.args[0])
class CloudSigmaNodeSize(NodeSize):
def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver):
self.id = id
self.name = name
self.cpu = cpu
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, cpu=%s, ram=%s disk=%s '
'bandwidth=%s price=%s driver=%s ...>')
% (self.id, self.name, self.cpu, self.ram, self.disk,
self.bandwidth, self.price, self.driver.name))
class CloudSigma_1_0_Response(Response):
def success(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
return self.status >= 200 and self.status <= 299
def parse_body(self):
if not self.body:
return self.body
return str2dicts(self.body)
def parse_error(self):
return 'Error: %s' % (self.body.replace('errors:', '').strip())
class CloudSigma_1_0_Connection(ConnectionUserAndKey):
host = API_ENDPOINTS_1_0[DEFAULT_REGION]['host']
responseCls = CloudSigma_1_0_Response
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'Basic %s' % (base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
return headers
class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver):
type = Provider.CLOUDSIGMA
name = 'CloudSigma (API v1.0)'
website = 'http://www.cloudsigma.com/'
connectionCls = CloudSigma_1_0_Connection
IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive
# imaging process
NODE_STATE_MAP = {
'active': NodeState.RUNNING,
'stopped': NodeState.TERMINATED,
'dead': NodeState.TERMINATED,
'dumped': NodeState.TERMINATED,
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS_1_0:
raise ValueError('Invalid region: %s' % (region))
self._host_argument_set = host is not None
self.api_name = 'cloudsigma_%s' % (region)
super(CloudSigma_1_0_NodeDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host,
port=port,
region=region,
**kwargs)
def reboot_node(self, node):
"""
Reboot a node.
Because Cloudsigma API does not provide native reboot call,
it's emulated using stop and start.
@inherits: :class:`NodeDriver.reboot_node`
"""
node = self._get_node(node.id)
state = node.state
if state == NodeState.RUNNING:
stopped = self.ex_stop_node(node)
else:
stopped = True
if not stopped:
raise CloudSigmaException(
'Could not stop node with id %s' % (node.id))
success = self.ex_start_node(node)
return success
def destroy_node(self, node):
"""
Destroy a node (all the drives associated with it are NOT destroyed).
If a node is still running, it's stopped before it's destroyed.
@inherits: :class:`NodeDriver.destroy_node`
"""
node = self._get_node(node.id)
state = node.state
# Node cannot be destroyed while running so it must be stopped first
if state == NodeState.RUNNING:
stopped = self.ex_stop_node(node)
else:
stopped = True
if not stopped:
raise CloudSigmaException(
'Could not stop node with id %s' % (node.id))
response = self.connection.request(
action='/servers/%s/destroy' % (node.id),
method='POST')
return response.status == 204
def list_images(self, location=None):
"""
Return a list of available standard images (this call might take up
to 15 seconds to return).
@inherits: :class:`NodeDriver.list_images`
"""
response = self.connection.request(
action='/drives/standard/info').object
images = []
for value in response:
if value.get('type'):
if value['type'] == 'disk':
image = NodeImage(id=value['drive'], name=value['name'],
driver=self.connection.driver,
extra={'size': value['size']})
images.append(image)
return images
def list_sizes(self, location=None):
sizes = []
for value in INSTANCE_TYPES:
key = value['id']
size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
cpu=value['cpu'], ram=value['memory'],
disk=value['disk'],
bandwidth=value['bandwidth'],
price=self._get_size_price(size_id=key),
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_nodes(self):
response = self.connection.request(action='/servers/info').object
nodes = []
for data in response:
node = self._to_node(data)
if node:
nodes.append(node)
return nodes
def create_node(self, **kwargs):
"""
Creates a CloudSigma instance
@inherits: :class:`NodeDriver.create_node`
:keyword name: String with a name for this new node (required)
:type name: ``str``
:keyword smp: Number of virtual processors or None to calculate
based on the cpu speed
:type smp: ``int``
:keyword nic_model: e1000, rtl8139 or virtio (is not specified,
e1000 is used)
:type nic_model: ``str``
:keyword vnc_password: If not set, VNC access is disabled.
:type vnc_password: ``bool``
:keyword drive_type: Drive type (ssd|hdd). Defaults to hdd.
:type drive_type: ``str``
"""
size = kwargs['size']
image = kwargs['image']
smp = kwargs.get('smp', 'auto')
nic_model = kwargs.get('nic_model', 'e1000')
vnc_password = kwargs.get('vnc_password', None)
drive_type = kwargs.get('drive_type', 'hdd')
if nic_model not in ['e1000', 'rtl8139', 'virtio']:
raise CloudSigmaException('Invalid NIC model specified')
if drive_type not in ['hdd', 'ssd']:
raise CloudSigmaException('Invalid drive type "%s". Valid types'
' are: hdd, ssd' % (drive_type))
drive_data = {}
drive_data.update({'name': kwargs['name'],
'size': '%sG' % (kwargs['size'].disk),
'driveType': drive_type})
response = self.connection.request(
action='/drives/%s/clone' % image.id,
data=dict2str(drive_data),
method='POST').object
if not response:
raise CloudSigmaException('Drive creation failed')
drive_uuid = response[0]['drive']
response = self.connection.request(
action='/drives/%s/info' % (drive_uuid)).object
imaging_start = time.time()
while 'imaging' in response[0]:
response = self.connection.request(
action='/drives/%s/info' % (drive_uuid)).object
elapsed_time = time.time() - imaging_start
timed_out = elapsed_time >= self.IMAGING_TIMEOUT
if 'imaging' in response[0] and timed_out:
raise CloudSigmaException('Drive imaging timed out')
time.sleep(1)
node_data = {}
node_data.update(
{'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram,
'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp})
node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'})
if vnc_password:
node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password})
response = self.connection.request(action='/servers/create',
data=dict2str(node_data),
method='POST').object
if not isinstance(response, list):
response = [response]
node = self._to_node(response[0])
if node is None:
# Insufficient funds, destroy created drive
self.ex_drive_destroy(drive_uuid)
raise CloudSigmaInsufficientFundsException(
'Insufficient funds, node creation failed')
# Start the node after it has been created
started = self.ex_start_node(node)
if started:
node.state = NodeState.RUNNING
return node
def ex_destroy_node_and_drives(self, node):
"""
Destroy a node and all the drives associated with it.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
node = self._get_node_info(node)
drive_uuids = []
for key, value in node.items():
if (key.startswith('ide:') or key.startswith(
'scsi') or key.startswith('block')) and\
not (key.endswith(':bytes') or
key.endswith(':requests') or key.endswith('media')):
drive_uuids.append(value)
node_destroyed = self.destroy_node(self._to_node(node))
if not node_destroyed:
return False
for drive_uuid in drive_uuids:
self.ex_drive_destroy(drive_uuid)
return True
def ex_static_ip_list(self):
"""
Return a list of available static IP addresses.
:rtype: ``list`` of ``str``
"""
response = self.connection.request(action='/resources/ip/list',
method='GET')
if response.status != 200:
raise CloudSigmaException('Could not retrieve IP list')
ips = str2list(response.body)
return ips
def ex_drives_list(self):
"""
Return a list of all the available drives.
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(action='/drives/info', method='GET')
result = str2dicts(response.body)
return result
def ex_static_ip_create(self):
"""
Create a new static IP address.p
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(action='/resources/ip/create',
method='GET')
result = str2dicts(response.body)
return result
def ex_static_ip_destroy(self, ip_address):
"""
Destroy a static IP address.
:param ip_address: IP address which should be used
:type ip_address: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/resources/ip/%s/destroy' % (ip_address), method='GET')
return response.status == 204
def ex_drive_destroy(self, drive_uuid):
"""
Destroy a drive with a specified uuid.
If the drive is currently mounted an exception is thrown.
:param drive_uuid: Drive uuid which should be used
:type drive_uuid: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/drives/%s/destroy' % (drive_uuid), method='POST')
return response.status == 204
def ex_set_node_configuration(self, node, **kwargs):
"""
Update a node configuration.
Changing most of the parameters requires node to be stopped.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:param kwargs: keyword arguments
:type kwargs: ``dict``
:rtype: ``bool``
"""
valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$',
'^boot$', '^nic:0:model$', '^nic:0:dhcp',
'^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$',
'^vnc:ip$', '^vnc:password$', '^vnc:tls',
'^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$',
'^block:[0-7](:media)?$')
invalid_keys = []
keys = list(kwargs.keys())
for key in keys:
matches = False
for regex in valid_keys:
if re.match(regex, key):
matches = True
break
if not matches:
invalid_keys.append(key)
if invalid_keys:
raise CloudSigmaException(
'Invalid configuration key specified: %s' %
(',' .join(invalid_keys)))
response = self.connection.request(
action='/servers/%s/set' % (node.id),
data=dict2str(kwargs),
method='POST')
return (response.status == 200 and response.body != '')
def ex_start_node(self, node):
"""
Start a node.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
response = self.connection.request(
action='/servers/%s/start' % (node.id),
method='POST')
return response.status == 200
def ex_stop_node(self, node):
"""
Stop (shutdown) a node.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
response = self.connection.request(
action='/servers/%s/stop' % (node.id),
method='POST')
return response.status == 204
def ex_shutdown_node(self, node):
"""
Stop (shutdown) a node.
@inherits: :class:`CloudSigmaBaseNodeDriver.ex_stop_node`
"""
return self.ex_stop_node(node)
def ex_destroy_drive(self, drive_uuid):
"""
Destroy a drive.
:param drive_uuid: Drive uuid which should be used
:type drive_uuid: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/drives/%s/destroy' % (drive_uuid),
method='POST')
return response.status == 204
def _ex_connection_class_kwargs(self):
"""
Return the host value based on the user supplied region.
"""
kwargs = {}
if not self._host_argument_set:
kwargs['host'] = API_ENDPOINTS_1_0[self.region]['host']
return kwargs
def _to_node(self, data):
if data:
try:
state = self.NODE_STATE_MAP[data['status']]
except KeyError:
state = NodeState.UNKNOWN
if 'server' not in data:
# Response does not contain server UUID if the server
# creation failed because of insufficient funds.
return None
public_ips = []
if 'nic:0:dhcp' in data:
if isinstance(data['nic:0:dhcp'], list):
public_ips = data['nic:0:dhcp']
else:
public_ips = [data['nic:0:dhcp']]
extra = {}
extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'),
('status', 'str')]
for key, value_type in extra_keys:
if key in data:
value = data[key]
if value_type == 'int':
value = int(value)
elif value_type == 'auto':
try:
value = int(value)
except ValueError:
pass
extra.update({key: value})
if 'vnc:ip' in data and 'vnc:password' in data:
extra.update({'vnc_ip': data['vnc:ip'],
'vnc_password': data['vnc:password']})
node = Node(id=data['server'], name=data['name'], state=state,
public_ips=public_ips, private_ips=None,
driver=self.connection.driver,
extra=extra)
return node
return None
def _get_node(self, node_id):
nodes = self.list_nodes()
node = [node for node in nodes if node.id == node.id]
if not node:
raise CloudSigmaException(
'Node with id %s does not exist' % (node_id))
return node[0]
def _get_node_info(self, node):
response = self.connection.request(
action='/servers/%s/info' % (node.id))
result = str2dicts(response.body)
return result[0]
class CloudSigmaZrhConnection(CloudSigma_1_0_Connection):
"""
Connection class for the CloudSigma driver for the Zurich end-point
"""
host = API_ENDPOINTS_1_0['zrh']['host']
class CloudSigmaZrhNodeDriver(CloudSigma_1_0_NodeDriver):
"""
CloudSigma node driver for the Zurich end-point
"""
connectionCls = CloudSigmaZrhConnection
api_name = 'cloudsigma_zrh'
class CloudSigmaLvsConnection(CloudSigma_1_0_Connection):
"""
Connection class for the CloudSigma driver for the Las Vegas end-point
"""
host = API_ENDPOINTS_1_0['lvs']['host']
class CloudSigmaLvsNodeDriver(CloudSigma_1_0_NodeDriver):
"""
CloudSigma node driver for the Las Vegas end-point
"""
connectionCls = CloudSigmaLvsConnection
api_name = 'cloudsigma_lvs'
class CloudSigmaError(ProviderError):
"""
Represents CloudSigma API error.
"""
def __init__(self, http_code, error_type, error_msg, error_point, driver):
"""
:param http_code: HTTP status code.
:type http_code: ``int``
:param error_type: Type of error (validation / notexist / backend /
permissions database / concurrency / billing /
payment)
:type error_type: ``str``
:param error_msg: A description of the error that occurred.
:type error_msg: ``str``
:param error_point: Point at which the error occurred. Can be None.
:type error_point: ``str`` or ``None``
"""
super(CloudSigmaError, self).__init__(http_code=http_code,
value=error_msg, driver=driver)
self.error_type = error_type
self.error_msg = error_msg
self.error_point = error_point
class CloudSigmaSubscription(object):
"""
Represents CloudSigma subscription.
"""
def __init__(self, id, resource, amount, period, status, price, start_time,
end_time, auto_renew, subscribed_object=None):
"""
:param id: Subscription ID.
:type id: ``str``
:param resource: Resource (e.g vlan, ip, etc.).
:type resource: ``str``
:param period: Subscription period.
:type period: ``str``
:param status: Subscription status (active / inactive).
:type status: ``str``
:param price: Subscription price.
:type price: ``str``
:param start_time: Start time for this subscription.
:type start_time: ``datetime.datetime``
:param end_time: End time for this subscription.
:type end_time: ``datetime.datetime``
:param auto_renew: True if the subscription is auto renewed.
:type auto_renew: ``bool``
:param subscribed_object: Optional UUID of the subscribed object.
:type subscribed_object: ``str``
"""
self.id = id
self.resource = resource
self.amount = amount
self.period = period
self.status = status
self.price = price
self.start_time = start_time
self.end_time = end_time
self.auto_renew = auto_renew
self.subscribed_object = subscribed_object
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<CloudSigmaSubscription id=%s, resource=%s, amount=%s, '
'period=%s, object_uuid=%s>' %
(self.id, self.resource, self.amount, self.period,
self.subscribed_object))
class CloudSigmaTag(object):
"""
Represents a CloudSigma tag object.
"""
def __init__(self, id, name, resources=None):
"""
:param id: Tag ID.
:type id: ``str``
:param name: Tag name.
:type name: ``str``
:param resource: IDs of resources which are associated with this tag.
:type resources: ``list`` of ``str``
"""
self.id = id
self.name = name
self.resources = resources if resources else []
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<CloudSigmaTag id=%s, name=%s, resources=%s>' %
(self.id, self.name, repr(self.resources)))
class CloudSigmaDrive(NodeImage):
"""
Represents a CloudSigma drive.
"""
def __init__(self, id, name, size, media, status, driver, extra=None):
"""
:param id: Drive ID.
:type id: ``str``
:param name: Drive name.
:type name: ``str``
:param size: Drive size (in bytes).
:type size: ``int``
:param media: Drive media (cdrom / disk).
:type media: ``str``
:param status: Drive status (unmounted / mounted).
:type status: ``str``
"""
super(CloudSigmaDrive, self).__init__(id=id, name=name, driver=driver,
extra=extra)
self.size = size
self.media = media
self.status = status
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaSize id=%s, name=%s size=%s, media=%s, '
'status=%s>') %
(self.id, self.name, self.size, self.media, self.status))
class CloudSigmaFirewallPolicy(object):
"""
Represents a CloudSigma firewall policy.
"""
def __init__(self, id, name, rules):
"""
:param id: Policy ID.
:type id: ``str``
:param name: Policy name.
:type name: ``str``
:param rules: Rules associated with this policy.
:type rules: ``list`` of :class:`.CloudSigmaFirewallPolicyRule` objects
"""
self.id = id
self.name = name
self.rules = rules if rules else []
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaFirewallPolicy id=%s, name=%s rules=%s>') %
(self.id, self.name, repr(self.rules)))
class CloudSigmaFirewallPolicyRule(object):
"""
Represents a CloudSigma firewall policy rule.
"""
def __init__(self, action, direction, ip_proto=None, src_ip=None,
src_port=None, dst_ip=None, dst_port=None, comment=None):
"""
:param action: Action (drop / accept).
:type action: ``str``
:param direction: Rule direction (in / out / both)>
:type direction: ``str``
:param ip_proto: IP protocol (tcp / udp).
:type ip_proto: ``str``.
:param src_ip: Source IP in CIDR notation.
:type src_ip: ``str``
:param src_port: Source port or a port range.
:type src_port: ``str``
:param dst_ip: Destination IP in CIDR notation.
:type dst_ip: ``str``
:param src_port: Destination port or a port range.
:type src_port: ``str``
:param comment: Comment associated with the policy.
:type comment: ``str``
"""
self.action = action
self.direction = direction
self.ip_proto = ip_proto
self.src_ip = src_ip
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.comment = comment
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaFirewallPolicyRule action=%s, direction=%s>') %
(self.action, self.direction))
class CloudSigma_2_0_Response(JsonResponse):
success_status_codes = [
httplib.OK,
httplib.ACCEPTED,
httplib.NO_CONTENT,
httplib.CREATED
]
def success(self):
return self.status in self.success_status_codes
def parse_error(self):
if int(self.status) == httplib.UNAUTHORIZED:
raise InvalidCredsError('Invalid credentials')
body = self.parse_body()
errors = self._parse_errors_from_body(body=body)
if errors:
# Throw first error
raise errors[0]
return body
def _parse_errors_from_body(self, body):
"""
Parse errors from the response body.
:return: List of error objects.
:rtype: ``list`` of :class:`.CloudSigmaError` objects
"""
errors = []
if not isinstance(body, list):
return None
for item in body:
if 'error_type' not in item:
# Unrecognized error
continue
error = CloudSigmaError(http_code=self.status,
error_type=item['error_type'],
error_msg=item['error_message'],
error_point=item['error_point'],
driver=self.connection.driver)
errors.append(error)
return errors
class CloudSigma_2_0_Connection(ConnectionUserAndKey):
host = API_ENDPOINTS_2_0[DEFAULT_REGION]['host']
responseCls = CloudSigma_2_0_Response
api_prefix = '/api/2.0'
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'Basic %s' % (base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
return headers
def encode_data(self, data):
data = json.dumps(data)
return data
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
params = params or {}
action = self.api_prefix + action
if method == 'GET':
params['limit'] = 0 # we want all the items back
return super(CloudSigma_2_0_Connection, self).request(action=action,
params=params,
data=data,
headers=headers,
method=method,
raw=raw)
class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver):
"""
Driver for CloudSigma API v2.0.
"""
name = 'CloudSigma (API v2.0)'
api_name = 'cloudsigma_zrh'
website = 'http://www.cloudsigma.com/'
connectionCls = CloudSigma_2_0_Connection
# Default drive transition timeout in seconds
DRIVE_TRANSITION_TIMEOUT = 500
# How long to sleep between different polling periods while waiting for
# drive transition
DRIVE_TRANSITION_SLEEP_INTERVAL = 5
NODE_STATE_MAP = {
'starting': NodeState.PENDING,
'stopping': NodeState.PENDING,
'unavailable': NodeState.ERROR,
'running': NodeState.RUNNING,
'stopped': NodeState.STOPPED,
'paused': NodeState.PAUSED
}
def __init__(self, key, secret, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS_2_0:
raise ValueError('Invalid region: %s' % (region))
if not secure:
# CloudSigma drive uses Basic Auth authentication and we don't want
# to allow user to accidentally send credentials over the wire in
# plain-text
raise ValueError('CloudSigma driver only supports a '
'secure connection')
self._host_argument_set = host is not None
super(CloudSigma_2_0_NodeDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host, port=port,
region=region,
**kwargs)
def list_nodes(self, ex_tag=None):
"""
List available nodes.
:param ex_tag: If specified, only return servers tagged with the
provided tag.
:type ex_tag: :class:`CloudSigmaTag`
"""
if ex_tag:
action = '/tags/%s/servers/detail/' % (ex_tag.id)
else:
action = '/servers/detail/'
response = self.connection.request(action=action, method='GET').object
nodes = [self._to_node(data=item) for item in response['objects']]
return nodes
def list_sizes(self):
"""
List available sizes.
"""
sizes = []
for value in INSTANCE_TYPES:
key = value['id']
size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
cpu=value['cpu'], ram=value['memory'],
disk=value['disk'],
bandwidth=value['bandwidth'],
price=self._get_size_price(size_id=key),
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_images(self):
"""
Return a list of available pre-installed library drives.
Note: If you want to list all the available library drives (both
pre-installed and installation CDs), use :meth:`ex_list_library_drives`
method.
"""
response = self.connection.request(action='/libdrives/').object
images = [self._to_image(data=item) for item in response['objects']]
# We filter out non pre-installed library drives by default because
# they can't be used directly following a default Libcloud server
# creation flow.
images = [image for image in images if
image.extra['image_type'] == 'preinst']
return images
def create_node(self, name, size, image, ex_metadata=None,
ex_vnc_password=None, ex_avoid=None, ex_vlan=None):
"""
Create a new server.
Server creation consists multiple steps depending on the type of the
image used.
1. Installation CD:
1. Create a server and attach installation cd
2. Start a server
2. Pre-installed image:
1. Clone provided library drive so we can use it
2. Resize cloned drive to the desired size
3. Create a server and attach cloned drive
4. Start a server
:param ex_metadata: Key / value pairs to associate with the
created node. (optional)
:type ex_metadata: ``dict``
:param ex_vnc_password: Password to use for VNC access. If not
provided, random password is generated.
:type ex_vnc_password: ``str``
:param ex_avoid: A list of server UUIDs to avoid when starting this
node. (optional)
:type ex_avoid: ``list``
:param ex_vlan: Optional UUID of a VLAN network to use. If specified,
server will have two nics assigned - 1 with a public ip
and 1 with the provided VLAN.
:type ex_vlan: ``str``
"""
is_installation_cd = self._is_installation_cd(image=image)
if ex_vnc_password:
vnc_password = ex_vnc_password
else:
# VNC password is not provided, generate a random one.
vnc_password = get_secure_random_string(size=12)
drive_name = '%s-drive' % (name)
# size is specified in GB
drive_size = (size.disk * 1024 * 1024 * 1024)
if not is_installation_cd:
# 1. Clone library drive so we can use it
drive = self.ex_clone_drive(drive=image, name=drive_name)
# Wait for drive clone to finish
drive = self._wait_for_drive_state_transition(drive=drive,
state='unmounted')
# 2. Resize drive to the desired disk size if the desired disk size
# is larger than the cloned drive size.
if drive_size > drive.size:
drive = self.ex_resize_drive(drive=drive, size=drive_size)
# Wait for drive resize to finish
drive = self._wait_for_drive_state_transition(drive=drive,
state='unmounted')
else:
# No need to clone installation CDs
drive = image
# 3. Create server and attach cloned drive
# ide 0:0
data = {}
data['name'] = name
data['cpu'] = size.cpu
data['mem'] = (size.ram * 1024 * 1024)
data['vnc_password'] = vnc_password
if ex_metadata:
data['meta'] = ex_metadata
# Assign 1 public interface (DHCP) to the node
nic = {
'boot_order': None,
'ip_v4_conf': {
'conf': 'dhcp',
},
'ip_v6_conf': None
}
nics = [nic]
if ex_vlan:
# Assign another interface for VLAN
nic = {
'boot_order': None,
'ip_v4_conf': None,
'ip_v6_conf': None,
'vlan': ex_vlan
}
nics.append(nic)
# Need to use IDE for installation CDs
if is_installation_cd:
device_type = 'ide'
else:
device_type = 'virtio'
drive = {
'boot_order': 1,
'dev_channel': '0:0',
'device': device_type,
'drive': drive.id
}
drives = [drive]
data['nics'] = nics
data['drives'] = drives
action = '/servers/'
response = self.connection.request(action=action, method='POST',
data=data)
node = self._to_node(response.object['objects'][0])
# 4. Start server
self.ex_start_node(node=node, ex_avoid=ex_avoid)
return node
def destroy_node(self, node):
"""
Destroy the node and all the associated drives.
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/servers/%s/' % (node.id)
params = {'recurse': 'all_drives'}
response = self.connection.request(action=action, method='DELETE',
params=params)
return response.status == httplib.NO_CONTENT
# Server extension methods
def ex_edit_node(self, node, params):
"""
Edit a node.
:param node: Node to edit.
:type node: :class:`libcloud.compute.base.Node`
:param params: Node parameters to update.
:type params: ``dict``
:return Edited node.
:rtype: :class:`libcloud.compute.base.Node`
"""
data = {}
# name, cpu, mem and vnc_password attributes must always be present so
# we just copy them from the to-be-edited node
data['name'] = node.name
data['cpu'] = node.extra['cpu']
data['mem'] = node.extra['mem']
data['vnc_password'] = node.extra['vnc_password']
nics = copy.deepcopy(node.extra.get('nics', []))
data['nics'] = nics
data.update(params)
action = '/servers/%s/' % (node.id)
response = self.connection.request(action=action, method='PUT',
data=data).object
node = self._to_node(data=response)
return node
def ex_start_node(self, node, ex_avoid=None):
"""
Start a node.
:param node: Node to start.
:type node: :class:`libcloud.compute.base.Node`
:param ex_avoid: A list of other server uuids to avoid when
starting this node. If provided, node will
attempt to be started on a different
physical infrastructure from other servers
specified using this argument. (optional)
:type ex_avoid: ``list``
"""
params = {}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='start',
params=params,
method='POST')
return response.status == httplib.ACCEPTED
def ex_stop_node(self, node):
"""
Stop a node.
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='stop',
method='POST')
return response.status == httplib.ACCEPTED
def ex_clone_node(self, node, name=None, random_vnc_password=None):
"""
Clone the provided node.
:param name: Optional name for the cloned node.
:type name: ``str``
:param random_vnc_password: If True, a new random VNC password will be
generated for the cloned node. Otherwise
password from the cloned node will be
reused.
:type random_vnc_password: ``bool``
:return: Cloned node.
:rtype: :class:`libcloud.compute.base.Node`
"""
data = {}
data['name'] = name
data['random_vnc_password'] = random_vnc_password
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='clone',
method='POST', data=data).object
node = self._to_node(data=response)
return node
def ex_open_vnc_tunnel(self, node):
"""
Open a VNC tunnel to the provided node and return the VNC url.
:param node: Node to open the VNC tunnel to.
:type node: :class:`libcloud.compute.base.Node`
:return: URL of the opened VNC tunnel.
:rtype: ``str``
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='open_vnc',
method='POST').object
vnc_url = response['vnc_url']
return vnc_url
def ex_close_vnc_tunnel(self, node):
"""
Close a VNC server to the provided node.
:param node: Node to close the VNC tunnel to.
:type node: :class:`libcloud.compute.base.Node`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='close_vnc',
method='POST')
return response.status == httplib.ACCEPTED
# Drive extension methods
def ex_list_library_drives(self):
"""
Return a list of all the available library drives (pre-installed and
installation CDs).
:rtype: ``list`` of :class:`.CloudSigmaDrive` objects
"""
response = self.connection.request(action='/libdrives/').object
drives = [self._to_drive(data=item) for item in response['objects']]
return drives
def ex_list_user_drives(self):
"""
Return a list of all the available user's drives.
:rtype: ``list`` of :class:`.CloudSigmaDrive` objects
"""
response = self.connection.request(action='/drives/detail/').object
drives = [self._to_drive(data=item) for item in response['objects']]
return drives
def ex_create_drive(self, name, size, media='disk', ex_avoid=None):
"""
Create a new drive.
:param name: Drive name.
:type name: ``str``
:param size: Drive size in bytes.
:type size: ``int``
:param media: Drive media type (cdrom, disk).
:type media: ``str``
:param ex_avoid: A list of other drive uuids to avoid when
creating this drive. If provided, drive will
attempt to be created on a different
physical infrastructure from other drives
specified using this argument. (optional)
:type ex_avoid: ``list``
:return: Created drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
params = {}
data = {
'name': name,
'size': size,
'media': media
}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
action = '/drives/'
response = self.connection.request(action=action, method='POST',
params=params, data=data).object
drive = self._to_drive(data=response['objects'][0])
return drive
def ex_clone_drive(self, drive, name=None, ex_avoid=None):
"""
Clone a library or a standard drive.
:param drive: Drive to clone.
:type drive: :class:`libcloud.compute.base.NodeImage` or
:class:`.CloudSigmaDrive`
:param name: Optional name for the cloned drive.
:type name: ``str``
:param ex_avoid: A list of other drive uuids to avoid when
creating this drive. If provided, drive will
attempt to be created on a different
physical infrastructure from other drives
specified using this argument. (optional)
:type ex_avoid: ``list``
:return: New cloned drive.
:rtype: :class:`.CloudSigmaDrive`
"""
params = {}
data = {}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
if name:
data['name'] = name
path = '/drives/%s/action/' % (drive.id)
response = self._perform_action(path=path, action='clone',
params=params, data=data,
method='POST')
drive = self._to_drive(data=response.object['objects'][0])
return drive
def ex_resize_drive(self, drive, size):
"""
Resize a drive.
:param drive: Drive to resize.
:param size: New drive size in bytes.
:type size: ``int``
:return: Drive object which is being resized.
:rtype: :class:`.CloudSigmaDrive`
"""
path = '/drives/%s/action/' % (drive.id)
data = {'name': drive.name, 'size': size, 'media': 'disk'}
response = self._perform_action(path=path, action='resize',
method='POST', data=data)
drive = self._to_drive(data=response.object['objects'][0])
return drive
def ex_attach_drive(self, node):
"""
Attach a drive to the provided node.
"""
# TODO
pass
def ex_get_drive(self, drive_id):
"""
Retrieve information about a single drive.
:param drive_id: ID of the drive to retrieve.
:type drive_id: ``str``
:return: Drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
action = '/drives/%s/' % (drive_id)
response = self.connection.request(action=action).object
drive = self._to_drive(data=response)
return drive
# Firewall policies extension methods
def ex_list_firewall_policies(self):
"""
List firewall policies.
:rtype: ``list`` of :class:`.CloudSigmaFirewallPolicy`
"""
action = '/fwpolicies/detail/'
response = self.connection.request(action=action, method='GET').object
policies = [self._to_firewall_policy(data=item) for item
in response['objects']]
return policies
def ex_create_firewall_policy(self, name, rules=None):
"""
Create a firewall policy.
:param name: Policy name.
:type name: ``str``
:param rules: List of firewall policy rules to associate with this
policy. (optional)
:type rules: ``list`` of ``dict``
:return: Created firewall policy object.
:rtype: :class:`.CloudSigmaFirewallPolicy`
"""
data = {}
obj = {}
obj['name'] = name
if rules:
obj['rules'] = rules
data['objects'] = [obj]
action = '/fwpolicies/'
response = self.connection.request(action=action, method='POST',
data=data).object
policy = self._to_firewall_policy(data=response['objects'][0])
return policy
def ex_attach_firewall_policy(self, policy, node, nic_mac=None):
"""
Attach firewall policy to a public NIC interface on the server.
:param policy: Firewall policy to attach.
:type policy: :class:`.CloudSigmaFirewallPolicy`
:param node: Node to attach policy to.
:type node: :class:`libcloud.compute.base.Node`
:param nic_mac: Optional MAC address of the NIC to add the policy to.
If not specified, first public interface is used
instead.
:type nic_mac: ``str``
:return: Node object to which the policy was attached to.
:rtype: :class:`libcloud.compute.base.Node`
"""
nics = copy.deepcopy(node.extra.get('nics', []))
if nic_mac:
nic = [n for n in nics if n['mac'] == nic_mac]
else:
nic = nics
if len(nic) == 0:
raise ValueError('Cannot find the NIC interface to attach '
'a policy to')
nic = nic[0]
nic['firewall_policy'] = policy.id
params = {'nics': nics}
node = self.ex_edit_node(node=node, params=params)
return node
def ex_delete_firewall_policy(self, policy):
"""
Delete a firewall policy.
:param policy: Policy to delete to.
:type policy: :class:`.CloudSigmaFirewallPolicy`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/fwpolicies/%s/' % (policy.id)
response = self.connection.request(action=action, method='DELETE')
return response.status == httplib.NO_CONTENT
# Availability groups extension methods
def ex_list_servers_availability_groups(self):
"""
Return which running servers share the same physical compute host.
:return: A list of server UUIDs which share the same physical compute
host. Servers which share the same host will be stored under
the same list index.
:rtype: ``list`` of ``list``
"""
action = '/servers/availability_groups/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_list_drives_availability_groups(self):
"""
Return which drives share the same physical storage host.
:return: A list of drive UUIDs which share the same physical storage
host. Drives which share the same host will be stored under
the same list index.
:rtype: ``list`` of ``list``
"""
action = '/drives/availability_groups/'
response = self.connection.request(action=action, method='GET')
return response.object
# Tag extension methods
def ex_list_tags(self):
"""
List all the available tags.
:rtype: ``list`` of :class:`.CloudSigmaTag` objects
"""
action = '/tags/detail/'
response = self.connection.request(action=action, method='GET').object
tags = [self._to_tag(data=item) for item in response['objects']]
return tags
def ex_get_tag(self, tag_id):
"""
Retrieve a single tag.
:param tag_id: ID of the tag to retrieve.
:type tag_id: ``str``
:rtype: ``list`` of :class:`.CloudSigmaTag` objects
"""
action = '/tags/%s/' % (tag_id)
response = self.connection.request(action=action, method='GET').object
tag = self._to_tag(data=response)
return tag
def ex_create_tag(self, name, resource_uuids=None):
"""
Create a tag.
:param name: Tag name.
:type name: ``str``
:param resource_uuids: Optional list of resource UUIDs to assign this
tag go.
:type resource_uuids: ``list`` of ``str``
:return: Created tag object.
:rtype: :class:`.CloudSigmaTag`
"""
data = {}
data['objects'] = [
{
'name': name
}
]
if resource_uuids:
data['resources'] = resource_uuids
action = '/tags/'
response = self.connection.request(action=action, method='POST',
data=data).object
tag = self._to_tag(data=response['objects'][0])
return tag
def ex_tag_resource(self, resource, tag):
"""
Associate tag with the provided resource.
:param resource: Resource to associate a tag with.
:type resource: :class:`libcloud.compute.base.Node` or
:class:`.CloudSigmaDrive`
:param tag: Tag to associate with the resources.
:type tag: :class:`.CloudSigmaTag`
:return: Updated tag object.
:rtype: :class:`.CloudSigmaTag`
"""
if not hasattr(resource, 'id'):
raise ValueError('Resource doesn\'t have id attribute')
return self.ex_tag_resources(resources=[resource], tag=tag)
def ex_tag_resources(self, resources, tag):
"""
Associate tag with the provided resources.
:param resources: Resources to associate a tag with.
:type resources: ``list`` of :class:`libcloud.compute.base.Node` or
:class:`.CloudSigmaDrive`
:param tag: Tag to associate with the resources.
:type tag: :class:`.CloudSigmaTag`
:return: Updated tag object.
:rtype: :class:`.CloudSigmaTag`
"""
resources = tag.resources[:]
for resource in resources:
if not hasattr(resource, 'id'):
raise ValueError('Resource doesn\'t have id attribute')
resources.append(resource.id)
resources = list(set(resources))
data = {
'name': tag.name,
'resources': resources
}
action = '/tags/%s/' % (tag.id)
response = self.connection.request(action=action, method='PUT',
data=data).object
tag = self._to_tag(data=response)
return tag
def ex_delete_tag(self, tag):
"""
Delete a tag.
:param tag: Tag to delete.
:type tag: :class:`.CloudSigmaTag`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/tags/%s/' % (tag.id)
response = self.connection.request(action=action, method='DELETE')
return response.status == httplib.NO_CONTENT
# Account extension methods
def ex_get_balance(self):
"""
Retrueve account balance information.
:return: Dictionary with two items ("balance" and "currency").
:rtype: ``dict``
"""
action = '/balance/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_get_pricing(self):
"""
Retrive pricing information that are applicable to the cloud.
:return: Dictionary with pricing information.
:rtype: ``dict``
"""
action = '/pricing/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_get_usage(self):
"""
Retrieve account current usage information.
:return: Dictionary with two items ("balance" and "usage").
:rtype: ``dict``
"""
action = '/currentusage/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_list_subscriptions(self, status='all', resources=None):
"""
List subscriptions for this account.
:param status: Only return subscriptions with the provided status
(optional).
:type status: ``str``
:param resources: Only return subscriptions for the provided resources
(optional).
:type resources: ``list``
:rtype: ``list``
"""
params = {}
if status:
params['status'] = status
if resources:
params['resource'] = ','.join(resources)
response = self.connection.request(action='/subscriptions/',
params=params).object
subscriptions = self._to_subscriptions(data=response)
return subscriptions
def ex_toggle_subscription_auto_renew(self, subscription):
"""
Toggle subscription auto renew status.
:param subscription: Subscription to toggle the auto renew flag for.
:type subscription: :class:`.CloudSigmaSubscription`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
path = '/subscriptions/%s/action/' % (subscription.id)
response = self._perform_action(path=path, action='auto_renew',
method='POST')
return response.status == httplib.OK
def ex_create_subscription(self, amount, period, resource,
auto_renew=False):
"""
Create a new subscription.
:param amount: Subscription amount. For example, in dssd case this
would be disk size in gigabytes.
:type amount: ``int``
:param period: Subscription period. For example: 30 days, 1 week, 1
month, ...
:type period: ``str``
:param resource: Resource the purchase the subscription for.
:type resource: ``str``
:param auto_renew: True to automatically renew the subscription.
:type auto_renew: ``bool``
"""
data = [
{
'amount': amount,
'period': period,
'auto_renew': auto_renew,
'resource': resource
}
]
response = self.connection.request(action='/subscriptions/',
data=data, method='POST')
data = response.object['objects'][0]
subscription = self._to_subscription(data=data)
return subscription
# Misc extension methods
def ex_list_capabilities(self):
"""
Retrieve all the basic and sensible limits of the API.
:rtype: ``dict``
"""
action = '/capabilities/'
response = self.connection.request(action=action,
method='GET')
capabilities = response.object
return capabilities
def _parse_ips_from_nic(self, nic):
"""
Parse private and public IP addresses from the provided network
interface object.
:param nic: NIC object.
:type nic: ``dict``
:return: (public_ips, private_ips) tuple.
:rtype: ``tuple``
"""
public_ips, private_ips = [], []
ipv4_conf = nic['ip_v4_conf']
ipv6_conf = nic['ip_v6_conf']
ip_v4 = ipv4_conf['ip'] if ipv4_conf else None
ip_v6 = ipv6_conf['ip'] if ipv6_conf else None
ipv4 = ip_v4['uuid'] if ip_v4 else None
ipv6 = ip_v4['uuid'] if ip_v6 else None
ips = []
if ipv4:
ips.append(ipv4)
if ipv6:
ips.append(ipv6)
runtime = nic['runtime']
ip_v4 = runtime['ip_v4'] if nic['runtime'] else None
ip_v6 = runtime['ip_v6'] if nic['runtime'] else None
ipv4 = ip_v4['uuid'] if ip_v4 else None
ipv6 = ip_v4['uuid'] if ip_v6 else None
if ipv4:
ips.append(ipv4)
if ipv6:
ips.append(ipv6)
ips = set(ips)
for ip in ips:
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
return public_ips, private_ips
def _to_node(self, data):
extra_keys = ['cpu', 'mem', 'nics', 'vnc_password', 'meta']
id = data['uuid']
name = data['name']
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
public_ips = []
private_ips = []
extra = self._extract_values(obj=data, keys=extra_keys)
for nic in data['nics']:
_public_ips, _private_ips = self._parse_ips_from_nic(nic=nic)
public_ips.extend(_public_ips)
private_ips.extend(_private_ips)
node = Node(id=id, name=name, state=state, public_ips=public_ips,
private_ips=private_ips, driver=self, extra=extra)
return node
def _to_image(self, data):
extra_keys = ['description', 'arch', 'image_type', 'os', 'licenses',
'media', 'meta']
id = data['uuid']
name = data['name']
extra = self._extract_values(obj=data, keys=extra_keys)
image = NodeImage(id=id, name=name, driver=self, extra=extra)
return image
def _to_drive(self, data):
id = data['uuid']
name = data['name']
size = data['size']
media = data['media']
status = data['status']
extra = {}
drive = CloudSigmaDrive(id=id, name=name, size=size, media=media,
status=status, driver=self, extra=extra)
return drive
def _to_tag(self, data):
resources = data['resources']
resources = [resource['uuid'] for resource in resources]
tag = CloudSigmaTag(id=data['uuid'], name=data['name'],
resources=resources)
return tag
def _to_subscriptions(self, data):
subscriptions = []
for item in data['objects']:
subscription = self._to_subscription(data=item)
subscriptions.append(subscription)
return subscriptions
def _to_subscription(self, data):
start_time = parse_date(data['start_time'])
end_time = parse_date(data['end_time'])
obj_uuid = data['subscribed_object']
subscription = CloudSigmaSubscription(id=data['id'],
resource=data['resource'],
amount=int(data['amount']),
period=data['period'],
status=data['status'],
price=data['price'],
start_time=start_time,
end_time=end_time,
auto_renew=data['auto_renew'],
subscribed_object=obj_uuid)
return subscription
def _to_firewall_policy(self, data):
rules = []
for item in data.get('rules', []):
rule = CloudSigmaFirewallPolicyRule(action=item['action'],
direction=item['direction'],
ip_proto=item['ip_proto'],
src_ip=item['src_ip'],
src_port=item['src_port'],
dst_ip=item['dst_ip'],
dst_port=item['dst_port'],
comment=item['comment'])
rules.append(rule)
policy = CloudSigmaFirewallPolicy(id=data['uuid'], name=data['name'],
rules=rules)
return policy
def _perform_action(self, path, action, method='POST', params=None,
data=None):
"""
Perform API action and return response object.
"""
if params:
params = params.copy()
else:
params = {}
params['do'] = action
response = self.connection.request(action=path, method=method,
params=params, data=data)
return response
def _is_installation_cd(self, image):
"""
Detect if the provided image is an installation CD.
:rtype: ``bool``
"""
if isinstance(image, CloudSigmaDrive) and image.media == 'cdrom':
return True
return False
def _extract_values(self, obj, keys):
"""
Extract values from a dictionary and return a new dictionary with
extracted values.
:param obj: Dictionary to extract values from.
:type obj: ``dict``
:param keys: Keys to extract.
:type keys: ``list``
:return: Dictionary with extracted values.
:rtype: ``dict``
"""
result = {}
for key in keys:
result[key] = obj[key]
return result
def _wait_for_drive_state_transition(self, drive, state,
timeout=DRIVE_TRANSITION_TIMEOUT):
"""
Wait for a drive to transition to the provided state.
Note: This function blocks and periodically calls "GET drive" endpoint
to check if the drive has already transitioned to the desired state.
:param drive: Drive to wait for.
:type drive: :class:`.CloudSigmaDrive`
:param state: Desired drive state.
:type state: ``str``
:param timeout: How long to wait for the transition (in seconds) before
timing out.
:type timeout: ``int``
:return: Drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
start_time = time.time()
while drive.status != state:
drive = self.ex_get_drive(drive_id=drive.id)
if drive.status == state:
break
current_time = time.time()
delta = (current_time - start_time)
if delta >= timeout:
msg = ('Timed out while waiting for drive transition '
'(timeout=%s seconds)' % (timeout))
raise Exception(msg)
time.sleep(self.DRIVE_TRANSITION_SLEEP_INTERVAL)
return drive
def _ex_connection_class_kwargs(self):
"""
Return the host value based on the user supplied region.
"""
kwargs = {}
if not self._host_argument_set:
kwargs['host'] = API_ENDPOINTS_2_0[self.region]['host']
return kwargs
| apache-2.0 |
Freso/botbot-web | botbot/settings/base.py | 2 | 10121 | import ast
import os
import urlparse
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # pylint: disable=W0614,W0401
import dj_database_url
#==============================================================================
# Generic Django project settings
#==============================================================================
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ['WEB_SECRET_KEY']
AUTH_USER_MODEL = 'accounts.User'
INSTALLED_APPS = (
'botbot.apps.accounts',
'botbot.apps.bots',
'botbot.apps.logs',
'botbot.apps.plugins',
'botbot.apps.kudos',
'botbot.core',
'launchpad',
'pipeline',
'django_statsd',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.google',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.sitemaps',
'bootstrap_toolkit',
)
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
#==============================================================================
# Calculation of directories relative to the project module location
#==============================================================================
import os
import sys
import botbot as project_module
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
PYTHON_BIN = os.path.dirname(sys.executable)
ve_path = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
if "VAR_ROOT" in os.environ:
VAR_ROOT = os.environ.get("VAR_ROOT")
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
elif os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif ve_path and os.path.exists(os.path.join(ve_path, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(ve_path, 'var')
else:
# Set the variable root to the local configuration location (which is
# ignored by the repository).
VAR_ROOT = os.path.join(PROJECT_DIR, 'conf', 'local')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#==============================================================================
# Project URLS and media settings
#==============================================================================
ROOT_URLCONF = 'botbot.urls'
LOGIN_URL = '/settings/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/settings/'
INCLUDE_DJANGO_ADMIN = ast.literal_eval(os.environ.get(
'INCLUDE_DJANGO_ADMIN', 'True'))
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.environ.get('STATIC_ROOT', os.path.join(VAR_ROOT, 'static'))
MEDIA_ROOT = os.environ.get('MEDIA_ROOT', os.path.join(VAR_ROOT, 'uploads'))
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
# Defines PIPELINE settings and bundles
from ._asset_pipeline import *
DATABASES = {'default': dj_database_url.config(env='STORAGE_URL')}
# Reuse database connections
DATABASES['default'].update({
'CONN_MAX_AGE': None,
'ATOMIC_REQUESTS': True,
'OPTIONS': {"application_name": "django"},
})
GEOIP_CITY_DB_PATH = os.environ.get('GEOIP_CITY_DB_PATH',
os.path.join(VAR_ROOT, 'GeoLite2-City.mmdb'))
#==============================================================================
# Templates
#==============================================================================
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'django.core.context_processors.tz',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
)
#==============================================================================
# Middleware
#==============================================================================
MIDDLEWARE_CLASSES = (
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
) + MIDDLEWARE_CLASSES + (
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'botbot.core.middleware.TimezoneMiddleware',
)
#==============================================================================
# Auth / security
#============================================================================
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
AUTHENTICATION_BACKENDS += (
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
#==============================================================================
# Logger project settings
#==============================================================================
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': []
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'botbot.plugin_runner': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
#=============================================================================
# Cache
#=============================================================================
if 'MEMCACHE_URL' in os.environ:
DEFAULT_CACHE = {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': os.environ['MEMCACHE_URL'],
}
else:
DEFAULT_CACHE = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'botbot',
}
CACHES = {
'default': DEFAULT_CACHE
}
CACHE_MIDDLEWARE_SECONDS = 600 # Unit is second
#=============================================================================
# Email
#=============================================================================
ADMINS = (
('LL', 'info@lincolnloop.com'),
)
EMAIL_SUBJECT_PREFIX = "[BBME] "
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#==============================================================================
# Miscellaneous project settings
#==============================================================================
# Above this many users is considered a big channel, display is different
BIG_CHANNEL = 25
# Nicks requested to be excluded from logging
EXCLUDE_NICKS = os.environ.get('EXCLUDE_NICKS', '').split(',')
if EXCLUDE_NICKS == ['']:
EXCLUDE_NICKS = []
REDIS_PLUGIN_QUEUE_URL = os.environ.get('REDIS_PLUGIN_QUEUE_URL')
REDIS_PLUGIN_STORAGE_URL = os.environ.get('REDIS_PLUGIN_STORAGE_URL')
PUSH_STREAM_URL = os.environ.get('PUSH_STREAM_URL', None)
# ==============================================================================
# Third party app settings
# ==============================================================================
# SOUTH_DATABASE_ADAPTERS = {'default': 'south.db.postgresql_psycopg2'}
SOCIAL_AUTH_USER_MODEL = AUTH_USER_MODEL
SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
SOCIAL_AUTH_DEFAULT_USERNAME = 'user'
SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/accounts/manage/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/accounts/login/?error'
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
#'social.pipeline.user.get_username',
#'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
# Allauth
ACCOUNT_LOGOUT_ON_GET = (True)
# Statsd
STATSD_CLIENT = 'django_statsd.clients.request_aggregate'
STATSD_PATCHES = [
'django_statsd.patches.db',
'django_statsd.patches.cache',
]
STATSD_PREFIX = os.environ.get('STATSD_PREFIX', 'bbme')
DJANGO_HSTORE_ADAPTER_REGISTRATION = 'connection'
SOUTH_TESTS_MIGRATE = False
| mit |
konstruktoid/ansible-upstream | lib/ansible/module_utils/facts/network/aix.py | 143 | 5982 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class AIXNetwork(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = self.module.get_bin_path('netstat')
rc, out, err = self.module.run_command([netstat_path, '-nr'])
interface = dict(v4={}, v6={})
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses=[],
all_ipv6_addresses=[],
)
uname_rc = None
uname_out = None
uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match(r'^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[current_if['device']] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# don't bother with wpars it does not work
# zero means not in wpar
if not uname_rc and uname_out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device']])
if rc != 0:
break
for line in out.splitlines():
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path, '-El', current_if['device']])
if rc != 0:
break
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class AIXNetworkCollector(NetworkCollector):
_fact_class = AIXNetwork
_platform = 'AIX'
| gpl-3.0 |
jeremander/AttrVN | embed.py | 1 | 5687 | """This script reads in graph and attribute data, constructs similarity matrices for each text attribute of interest, then embeds each of these into Euclidean space.
Usage: python3 embed.py [path]
The directory [path] must include a file params.py containing all necessary parameters."""
import os
import sys
import imp
from attr_vn import *
def main():
path = sys.argv[1].strip('/')
pm = imp.load_source('params', path + '/params.py')
edges_filename = path + '/' + pm.edges_filename
attr_filename = path + '/' + pm.attr_filename
filenames = os.listdir(path)
# load/perform context embedding
if pm.use_context:
embedding_filename_prefix = '*context*_embedding'
valid_filenames = [filename for filename in filenames if filename.startswith(embedding_filename_prefix) and filename.endswith('.pickle')]
if (pm.load_embeddings and (len(valid_filenames) > 0)): # just use the first valid filename
context_features = timeit(load_object, pm.verbose)(path, valid_filenames[0][:-7], 'pickle', verbose = pm.verbose)
else:
A = edgelist_to_sparse_adjacency_operator(edges_filename, verbose = pm.verbose)
(eigvals, context_features) = timeit(embed_symmetric_operator, pm.verbose)(A, embedding = pm.embedding, k = pm.max_eig, tol = None, verbose = pm.verbose)
pairs = sorted(enumerate(np.abs(eigvals)), key = lambda pair : pair[1], reverse = True)
indices, abs_eigvals = map(np.array, zip(*pairs))
if (pm.which_elbow > 0):
elbows = get_elbows(abs_eigvals, n = pm.which_elbow, thresh = 0.0)
k = elbows[min(len(elbows), pm.which_elbow) - 1]
else:
k = len(eigvals)
if pm.verbose:
print("\nKeeping first k = %d eigenvectors..." % k)
context_features = context_features[:, indices[:k]]
obj_name = '*context*_embedding_%s_k=%d' % (pm.embedding, k)
timeit(save_object, pm.verbose)(context_features, path, obj_name, 'pickle', verbose = pm.verbose)
if pm.save_info:
np.savetxt(path + '/*context*_eigvals.csv', eigvals, fmt = '%f')
scree_plot(eigvals, k, show = False, filename = path + '/' + '*context*_scree.png')
n = context_features.shape[0]
else:
context_features = None
A = edgelist_to_sparse_adjacency_operator(edges_filename, verbose = pm.verbose)
n = A.shape[0]
# load/perform attribute embeddings
text_attr_types = [attr_type for (attr_type, dtype) in pm.predictor_attr_types.items() if dtype is str]
embedding_filename_prefixes = ['%s_embedding_sim=%s_delta=%s_%s' % (attr_type, pm.sim, str(pm.delta), pm.embedding) for attr_type in text_attr_types]
text_attr_features_by_type = dict()
if pm.load_embeddings: # first see what can be loaded from files
for (attr_type, embedding_filename_prefix) in zip(text_attr_types, embedding_filename_prefixes):
valid_filenames = [filename for filename in filenames if filename.startswith(embedding_filename_prefix) and filename.endswith('.pickle')]
if (len(valid_filenames) > 0): # just use the first valid filename
text_attr_features_by_type[attr_type] = timeit(load_object, pm.verbose)(path, valid_filenames[0][:-7], 'pickle', verbose = pm.verbose)
if (len(text_attr_features_by_type) < len(text_attr_types)): # need to construct AttributeAnalyzer to get remaining attribute embeddings
if pm.verbose:
print("\nCreating AttributeAnalyzer...")
a = timeit(AttributeAnalyzer, pm.verbose)(attr_filename, n, text_attr_types)
if pm.save_info:
a.rank_plot(rank_thresh = pm.rank_thresh, show = False, filename = path + '/' + 'attr_rank_plot.png')
with open(path + '/attr_report.txt', 'w') as f:
f.write(a.attr_report(rank_thresh = pm.rank_thresh))
attr_types_to_embed = [attr_type for attr_type in text_attr_types if (attr_type not in text_attr_features_by_type)]
for attr_type in attr_types_to_embed: # make attribute embedding for each text attribute type
pfa = timeit(a.make_pairwise_freq_analyzer, pm.verbose)(attr_type, edges_filename, verbose = pm.verbose)
sim_op = timeit(a.make_uncollapsed_operator, pm.verbose)(pfa, attr_type, sim = pm.sim, delta = pm.delta, verbose = pm.verbose)
(eigvals, attr_features) = timeit(embed_symmetric_operator, pm.verbose)(sim_op, embedding = pm.embedding, k = pm.max_eig, tol = None, verbose = pm.verbose)
pairs = sorted(enumerate(np.abs(eigvals)), key = lambda pair : pair[1], reverse = True)
indices, abs_eigvals = map(np.array, zip(*pairs))
if (pm.which_elbow > 0):
elbows = get_elbows(abs_eigvals, n = pm.which_elbow, thresh = 0.0)
k = elbows[min(len(elbows), pm.which_elbow) - 1]
else:
k = len(eigvals)
if pm.verbose:
print("\nKeeping first k = %d eigenvectors..." % k)
attr_features = attr_features[:, indices[:k]]
text_attr_features_by_type[attr_type] = attr_features
obj_name = '%s_embedding_sim=%s_delta=%s_%s_k=%d' % (attr_type, pm.sim, str(pm.delta), pm.embedding, k)
timeit(save_object, pm.verbose)(attr_features, path, obj_name, 'pickle', verbose = pm.verbose)
if pm.save_info:
np.savetxt(path + '/%s_eigvals.csv' % attr_type, eigvals, fmt = '%f')
scree_plot(eigvals, k, show = False, filename = path + '/%s_scree.png' % attr_type)
return (context_features, text_attr_features_by_type)
if __name__ == "__main__":
main()
| apache-2.0 |
davidak/mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/cuddlefish/tests/test_property_parser.py | 37 | 3063 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cuddlefish.property_parser import parse, MalformedLocaleFileError
class TestParser(unittest.TestCase):
def test_parse(self):
lines = [
# Comments are striped only if `#` is the first non-space character
"sharp=#can be in value",
"# comment",
"#key=value",
" # comment2",
"keyWithNoValue=",
"valueWithSpaces= ",
"valueWithMultilineSpaces= \\",
" \\",
" ",
# All spaces before/after are striped
" key = value ",
"key2=value2",
# Keys can contain '%'
"%s key=%s value",
# Accept empty lines
"",
" ",
# Multiline string must use backslash at end of lines
"multi=line\\", "value",
# With multiline string, left spaces are stripped ...
"some= spaces\\", " are\\ ", " stripped ",
# ... but not right spaces, except the last line!
"but=not \\", "all of \\", " them ",
# Explicit [other] plural definition
"explicitPlural[one] = one",
"explicitPlural[other] = other",
# Implicit [other] plural definition
"implicitPlural[one] = one",
"implicitPlural = other", # This key is the [other] one
]
# Ensure that all lines end with a `\n`
# And that strings are unicode ones (parser code relies on it)
lines = [unicode(l + "\n") for l in lines]
pairs = parse(lines)
expected = {
"sharp": "#can be in value",
"key": "value",
"key2": "value2",
"%s key": "%s value",
"keyWithNoValue": "",
"valueWithSpaces": "",
"valueWithMultilineSpaces": "",
"multi": "linevalue",
"some": "spacesarestripped",
"but": "not all of them",
"implicitPlural": {
"one": "one",
"other": "other"
},
"explicitPlural": {
"one": "one",
"other": "other"
},
}
self.assertEqual(pairs, expected)
def test_exceptions(self):
self.failUnlessRaises(MalformedLocaleFileError, parse,
["invalid line with no key value"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["plural[one]=plural with no [other] value"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["multiline with no last empty line=\\"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
["=no key"])
self.failUnlessRaises(MalformedLocaleFileError, parse,
[" =only spaces in key"])
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
c0fec0de/anytree | anytree/node/nodemixin.py | 1 | 15749 | # -*- coding: utf-8 -*-
import warnings
from anytree.iterators import PreOrderIter
from .exceptions import LoopError
from .exceptions import TreeError
class NodeMixin(object):
separator = "/"
u"""
The :any:`NodeMixin` class extends any Python class to a tree node.
The only tree relevant information is the `parent` attribute.
If `None` the :any:`NodeMixin` is root node.
If set to another node, the :any:`NodeMixin` becomes the child of it.
The `children` attribute can be used likewise.
If `None` the :any:`NodeMixin` has no children.
The `children` attribute can be set to any iterable of :any:`NodeMixin` instances.
These instances become children of the node.
>>> from anytree import NodeMixin, RenderTree
>>> class MyBaseClass(object): # Just an example of a base class
... foo = 4
>>> class MyClass(MyBaseClass, NodeMixin): # Add Node feature
... def __init__(self, name, length, width, parent=None, children=None):
... super(MyClass, self).__init__()
... self.name = name
... self.length = length
... self.width = width
... self.parent = parent
... if children:
... self.children = children
Construction via `parent`:
>>> my0 = MyClass('my0', 0, 0)
>>> my1 = MyClass('my1', 1, 0, parent=my0)
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
├── my1 1 0
└── my2 0 2
Construction via `children`:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... MyClass('my2', 0, 2),
... ]
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
├── my1 1 0
└── my2 0 2
Both approaches can be mixed:
>>> my0 = MyClass('my0', 0, 0, children=[
... MyClass('my1', 1, 0),
... ]
>>> my2 = MyClass('my2', 0, 2, parent=my0)
>>> for pre, _, node in RenderTree(my0):
... treestr = u"%s%s" % (pre, node.name)
... print(treestr.ljust(8), node.length, node.width)
my0 0 0
├── my1 1 0
└── my2 0 2
"""
@property
def parent(self):
u"""
Parent Node.
On set, the node is detached from any previous parent node and attached
to the new node.
>>> from anytree import Node, RenderTree
>>> udo = Node("Udo")
>>> marc = Node("Marc")
>>> lian = Node("Lian", parent=marc)
>>> print(RenderTree(udo))
Node('/Udo')
>>> print(RenderTree(marc))
Node('/Marc')
└── Node('/Marc/Lian')
**Attach**
>>> marc.parent = udo
>>> print(RenderTree(udo))
Node('/Udo')
└── Node('/Udo/Marc')
└── Node('/Udo/Marc/Lian')
**Detach**
To make a node to a root node, just set this attribute to `None`.
>>> marc.is_root
False
>>> marc.parent = None
>>> marc.is_root
True
"""
try:
return self.__parent
except AttributeError:
return None
@parent.setter
def parent(self, value):
if value is not None and not isinstance(value, NodeMixin):
msg = "Parent node %r is not of type 'NodeMixin'." % (value, )
raise TreeError(msg)
try:
parent = self.__parent
except AttributeError:
parent = None
if parent is not value:
self.__check_loop(value)
self.__detach(parent)
self.__attach(value)
def __check_loop(self, node):
if node is not None:
if node is self:
msg = "Cannot set parent. %r cannot be parent of itself."
raise LoopError(msg % (self, ))
if any(child is self for child in node.iter_path_reverse()):
msg = "Cannot set parent. %r is parent of %r."
raise LoopError(msg % (self, node))
def __detach(self, parent):
if parent is not None:
self._pre_detach(parent)
parentchildren = parent.__children_or_empty
assert any(child is self for child in parentchildren), "Tree is corrupt." # pragma: no cover
# ATOMIC START
parent.__children = [child for child in parentchildren if child is not self]
self.__parent = None
# ATOMIC END
self._post_detach(parent)
def __attach(self, parent):
if parent is not None:
self._pre_attach(parent)
parentchildren = parent.__children_or_empty
assert not any(child is self for child in parentchildren), "Tree is corrupt." # pragma: no cover
# ATOMIC START
parentchildren.append(self)
self.__parent = parent
# ATOMIC END
self._post_attach(parent)
@property
def __children_or_empty(self):
try:
return self.__children
except AttributeError:
self.__children = []
return self.__children
@property
def children(self):
"""
All child nodes.
>>> from anytree import Node
>>> n = Node("n")
>>> a = Node("a", parent=n)
>>> b = Node("b", parent=n)
>>> c = Node("c", parent=n)
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/c'))
Modifying the children attribute modifies the tree.
**Detach**
The children attribute can be updated by setting to an iterable.
>>> n.children = [a, b]
>>> n.children
(Node('/n/a'), Node('/n/b'))
Node `c` is removed from the tree.
In case of an existing reference, the node `c` does not vanish and is the root of its own tree.
>>> c
Node('/c')
**Attach**
>>> d = Node("d")
>>> d
Node('/d')
>>> n.children = [a, b, d]
>>> n.children
(Node('/n/a'), Node('/n/b'), Node('/n/d'))
>>> d
Node('/n/d')
**Duplicate**
A node can just be the children once. Duplicates cause a :any:`TreeError`:
>>> n.children = [a, b, d, a]
Traceback (most recent call last):
...
anytree.node.exceptions.TreeError: Cannot add node Node('/n/a') multiple times as child.
"""
return tuple(self.__children_or_empty)
@staticmethod
def __check_children(children):
seen = set()
for child in children:
if not isinstance(child, NodeMixin):
msg = "Cannot add non-node object %r. It is not a subclass of 'NodeMixin'." % (child, )
raise TreeError(msg)
childid = id(child)
if childid not in seen:
seen.add(childid)
else:
msg = "Cannot add node %r multiple times as child." % (child, )
raise TreeError(msg)
@children.setter
def children(self, children):
# convert iterable to tuple
children = tuple(children)
NodeMixin.__check_children(children)
# ATOMIC start
old_children = self.children
del self.children
try:
self._pre_attach_children(children)
for child in children:
child.parent = self
self._post_attach_children(children)
assert len(self.children) == len(children)
except Exception:
self.children = old_children
raise
# ATOMIC end
@children.deleter
def children(self):
children = self.children
self._pre_detach_children(children)
for child in self.children:
child.parent = None
assert len(self.children) == 0
self._post_detach_children(children)
def _pre_detach_children(self, children):
"""Method call before detaching `children`."""
pass
def _post_detach_children(self, children):
"""Method call after detaching `children`."""
pass
def _pre_attach_children(self, children):
"""Method call before attaching `children`."""
pass
def _post_attach_children(self, children):
"""Method call after attaching `children`."""
pass
@property
def path(self):
"""
Path of this `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.path
(Node('/Udo'),)
>>> marc.path
(Node('/Udo'), Node('/Udo/Marc'))
>>> lian.path
(Node('/Udo'), Node('/Udo/Marc'), Node('/Udo/Marc/Lian'))
"""
return self._path
def iter_path_reverse(self):
"""
Iterate up the tree from the current node.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> for node in udo.iter_path_reverse():
... print(node)
Node('/Udo')
>>> for node in marc.iter_path_reverse():
... print(node)
Node('/Udo/Marc')
Node('/Udo')
>>> for node in lian.iter_path_reverse():
... print(node)
Node('/Udo/Marc/Lian')
Node('/Udo/Marc')
Node('/Udo')
"""
node = self
while node is not None:
yield node
node = node.parent
@property
def _path(self):
return tuple(reversed(list(self.iter_path_reverse())))
@property
def ancestors(self):
"""
All parent nodes and their parent nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.ancestors
()
>>> marc.ancestors
(Node('/Udo'),)
>>> lian.ancestors
(Node('/Udo'), Node('/Udo/Marc'))
"""
if self.parent is None:
return tuple()
return self.parent.path
@property
def anchestors(self):
"""
All parent nodes and their parent nodes - see :any:`ancestors`.
The attribute `anchestors` is just a typo of `ancestors`. Please use `ancestors`.
This attribute will be removed in the 3.0.0 release.
"""
warnings.warn(".anchestors was a typo and will be removed in version 3.0.0", DeprecationWarning)
return self.ancestors
@property
def descendants(self):
"""
All child nodes and all their child nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> soe = Node("Soe", parent=lian)
>>> udo.descendants
(Node('/Udo/Marc'), Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> marc.descendants
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lian/Soe'), Node('/Udo/Marc/Loui'))
>>> lian.descendants
(Node('/Udo/Marc/Lian/Soe'),)
"""
return tuple(PreOrderIter(self))[1:]
@property
def root(self):
"""
Tree Root Node.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.root
Node('/Udo')
>>> marc.root
Node('/Udo')
>>> lian.root
Node('/Udo')
"""
node = self
while node.parent is not None:
node = node.parent
return node
@property
def siblings(self):
"""
Tuple of nodes with the same parent.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.siblings
()
>>> marc.siblings
()
>>> lian.siblings
(Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> loui.siblings
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Lazy'))
"""
parent = self.parent
if parent is None:
return tuple()
else:
return tuple(node for node in parent.children if node is not self)
@property
def leaves(self):
"""
Tuple of all leaf nodes.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> loui = Node("Loui", parent=marc)
>>> lazy = Node("Lazy", parent=marc)
>>> udo.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
>>> marc.leaves
(Node('/Udo/Marc/Lian'), Node('/Udo/Marc/Loui'), Node('/Udo/Marc/Lazy'))
"""
return tuple(PreOrderIter(self, filter_=lambda node: node.is_leaf))
@property
def is_leaf(self):
"""
`Node` has no children (External Node).
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_leaf
False
>>> marc.is_leaf
False
>>> lian.is_leaf
True
"""
return len(self.__children_or_empty) == 0
@property
def is_root(self):
"""
`Node` is tree root.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.is_root
True
>>> marc.is_root
False
>>> lian.is_root
False
"""
return self.parent is None
@property
def height(self):
"""
Number of edges on the longest path to a leaf `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.height
2
>>> marc.height
1
>>> lian.height
0
"""
children = self.__children_or_empty
if children:
return max(child.height for child in children) + 1
else:
return 0
@property
def depth(self):
"""
Number of edges to the root `Node`.
>>> from anytree import Node
>>> udo = Node("Udo")
>>> marc = Node("Marc", parent=udo)
>>> lian = Node("Lian", parent=marc)
>>> udo.depth
0
>>> marc.depth
1
>>> lian.depth
2
"""
# count without storing the entire path
for i, _ in enumerate(self.iter_path_reverse()):
continue
return i
def _pre_detach(self, parent):
"""Method call before detaching from `parent`."""
pass
def _post_detach(self, parent):
"""Method call after detaching from `parent`."""
pass
def _pre_attach(self, parent):
"""Method call before attaching to `parent`."""
pass
def _post_attach(self, parent):
"""Method call after attaching to `parent`."""
pass
| apache-2.0 |
jwren/intellij-community | python/testData/inspections/GoogleDocstringParametersInspection/test.py | 40 | 1256 | """ test docstring inspection"""
def foo1(a, b):
"""
Parameters:
a: foo
b: bar
"""
pass
def foo(a, <weak_warning descr="Missing parameter b in docstring">b</weak_warning>, <weak_warning descr="Missing parameter n in docstring">n</weak_warning>):
"""
Parameters:
a: foo
"""
pass
def foo():
"""
Parameters:
<weak_warning descr="Unexpected parameter a in docstring">a</weak_warning>: foo
"""
pass
def compare(a, b, *, key=None):
"""
Parameters:
a:
b:
key:
"""
pass
def foo(a, <weak_warning descr="Missing parameter c in docstring">c</weak_warning>):
"""
Parameters:
a:
<weak_warning descr="Unexpected parameter b in docstring">b</weak_warning>:
"""
pass
def varagrs_defined_without_stars(x, *args, y, **kwargs):
"""
Args:
x:
args:
y:
kwargs:
"""
def varagrs_dont_exist():
"""
Args:
*<weak_warning descr="Unexpected parameter args in docstring">args</weak_warning>:
**<weak_warning descr="Unexpected parameter kwargs in docstring">kwargs</weak_warning>:
"""
def varagrs_undefined(x, *args, y, **kwargs):
"""
Args:
x:
y:
"""
def no_parameters_declared(x, y):
"""
"""
| apache-2.0 |
repos-python/xhtml2pdf | testrender/testrender.py | 147 | 10887 | #!/usr/bin/env python
import datetime
import os
import shutil
import sys
import glob
from optparse import OptionParser
from subprocess import Popen, PIPE
from xhtml2pdf import pisa
def render_pdf(filename, output_dir, options):
if options.debug:
print 'Rendering %s' % filename
basename = os.path.basename(filename)
outname = '%s.pdf' % os.path.splitext(basename)[0]
outfile = os.path.join(output_dir, outname)
input = open(filename, 'rb')
output = open(outfile, 'wb')
result = pisa.pisaDocument(input, output, path=filename)
input.close()
output.close()
if result.err:
print 'Error rendering %s: %s' % (filename, result.err)
sys.exit(1)
return outfile
def convert_to_png(infile, output_dir, options):
if options.debug:
print 'Converting %s to PNG' % infile
basename = os.path.basename(infile)
filename = os.path.splitext(basename)[0]
outname = '%s.page%%0d.png' % filename
globname = '%s.page*.png' % filename
outfile = os.path.join(output_dir, outname)
exec_cmd(options, options.convert_cmd, '-density', '150', infile, outfile)
outfiles = glob.glob(os.path.join(output_dir, globname))
outfiles.sort()
return outfiles
def create_diff_image(srcfile1, srcfile2, output_dir, options):
if options.debug:
print 'Creating difference image for %s and %s' % (srcfile1, srcfile2)
outname = '%s.diff%s' % os.path.splitext(srcfile1)
outfile = os.path.join(output_dir, outname)
_, result = exec_cmd(options, options.compare_cmd, '-metric', 'ae', srcfile1, srcfile2, '-lowlight-color', 'white', outfile)
diff_value = int(result.strip())
if diff_value > 0:
if not options.quiet:
print 'Image %s differs from reference, value is %i' % (srcfile1, diff_value)
return outfile, diff_value
def copy_ref_image(srcname, output_dir, options):
if options.debug:
print 'Copying reference image %s ' % srcname
dstname = os.path.basename(srcname)
dstfile = os.path.join(output_dir, '%s.ref%s' % os.path.splitext(dstname))
shutil.copyfile(srcname, dstfile)
return dstfile
def create_thumbnail(filename, options):
thumbfile = '%s.thumb%s' % os.path.splitext(filename)
if options.debug:
print 'Creating thumbnail of %s' % filename
exec_cmd(options, options.convert_cmd, '-resize', '20%', filename, thumbfile)
return thumbfile
def render_file(filename, output_dir, ref_dir, options):
if not options.quiet:
print 'Rendering %s' % filename
pdf = render_pdf(filename, output_dir, options)
pngs = convert_to_png(pdf, output_dir, options)
if options.create_reference:
return None, None, 0
thumbs = [create_thumbnail(png, options) for png in pngs]
pages = [{'png': p, 'png_thumb': thumbs[i]}
for i,p in enumerate(pngs)]
diff_count = 0
if not options.no_compare:
for page in pages:
refsrc = os.path.join(ref_dir, os.path.basename(page['png']))
if not os.path.isfile(refsrc):
print 'Reference image for %s not found!' % page['png']
continue
page['ref'] = copy_ref_image(refsrc, output_dir, options)
page['ref_thumb'] = create_thumbnail(page['ref'], options)
page['diff'], page['diff_value'] = \
create_diff_image(page['png'], page['ref'],
output_dir, options)
page['diff_thumb'] = create_thumbnail(page['diff'], options)
if page['diff_value']:
diff_count += 1
return pdf, pages, diff_count
def exec_cmd(options, *args):
if options.debug:
print 'Executing %s' % ' '.join(args)
proc = Popen(args, stdout=PIPE, stderr=PIPE)
result = proc.communicate()
if options.debug:
print result[0], result[1]
if proc.returncode:
print 'exec error (%i): %s' % (proc.returncode, result[1])
sys.exit(1)
return result[0], result[1]
def create_html_file(results, template_file, output_dir, options):
html = []
for pdf, pages, diff_count in results:
if options.only_errors and not diff_count:
continue
pdfname = os.path.basename(pdf)
html.append('<div class="result">\n'
'<h2><a href="%(pdf)s" class="pdf-file">%(pdf)s</a></h2>\n'
% {'pdf': pdfname})
for i, page in enumerate(pages):
vars = dict(((k, os.path.basename(v)) for k,v in page.items()
if k != 'diff_value'))
vars['page'] = i+1
if 'diff' in page:
vars['diff_value'] = page['diff_value']
if vars['diff_value']:
vars['class'] = 'result-page-diff error'
else:
if options.only_errors:
continue
vars['class'] = 'result-page-diff'
html.append('<div class="%(class)s">\n'
'<h3>Page %(page)i</h3>\n'
'<div class="result-img">\n'
'<div class="result-type">Difference '
'(Score %(diff_value)i)</div>\n'
'<a href="%(diff)s" class="diff-file">'
'<img src="%(diff_thumb)s"/></a>\n'
'</div>\n'
'<div class="result-img">\n'
'<div class="result-type">Rendered</div>\n'
'<a href="%(png)s" class="png-file">'
'<img src="%(png_thumb)s"/></a>\n'
'</div>\n'
'<div class="result-img">\n'
'<div class="result-type">Reference</div>\n'
'<a href="%(ref)s" class="ref-file">'
'<img src="%(ref_thumb)s"/></a>\n'
'</div>\n'
'</div>\n' % vars)
else:
html.append('<div class="result-page">\n'
'<h3>Page %(page)i</h3>\n'
'<div class="result-img">\n'
'<a href="%(png)s" class="png-file">'
'<img src="%(png_thumb)s"/></a>\n'
'</div>\n'
'</div>\n' % vars)
html.append('</div>\n\n')
now = datetime.datetime.now()
title = 'xhtml2pdf Test Rendering Results, %s' % now.strftime('%c')
template = open(template_file, 'rb').read()
template = template.replace('%%TITLE%%', title)
template = template.replace('%%RESULTS%%', '\n'.join(html))
htmlfile = os.path.join(output_dir, 'index.html')
outfile = open(htmlfile, 'wb')
outfile.write(template)
outfile.close()
return htmlfile
def main():
options, args = parser.parse_args()
base_dir = os.path.abspath(os.path.join(__file__, os.pardir))
source_dir = os.path.join(base_dir, options.source_dir)
if options.create_reference is not None:
output_dir = os.path.join(base_dir, options.create_reference)
else:
output_dir = os.path.join(base_dir, options.output_dir)
template_file = os.path.join(base_dir, options.html_template)
ref_dir = os.path.join(base_dir, options.ref_dir)
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
results = []
diff_count = 0
if len(args) == 0:
files = glob.glob(os.path.join(source_dir, '*.html'))
else:
files = [os.path.join(source_dir, arg) for arg in args]
for filename in files:
pdf, pages, diff = render_file(filename, output_dir, ref_dir, options)
diff_count += diff
results.append((pdf, pages, diff))
num = len(results)
if options.create_reference is not None:
print 'Created reference for %i file%s' % (num, '' if num == 1 else 's')
else:
htmlfile = create_html_file(results, template_file, output_dir, options)
if not options.quiet:
print 'Rendered %i file%s' % (num, '' if num == 1 else 's')
print '%i file%s differ%s from reference' % \
(diff_count, diff_count != 1 and 's' or '',
diff_count == 1 and 's' or '')
print 'Check %s for results' % htmlfile
if diff_count:
sys.exit(1)
parser = OptionParser(
usage='rendertest.py [options] [source_file] [source_file] ...',
description='Renders a single html source file or all files in the data '
'directory, converts them to PNG format and prepares a result '
'HTML file for comparing the output with an expected result')
parser.add_option('-s', '--source-dir', dest='source_dir', default='data/source',
help=('Path to directory containing the html source files'))
parser.add_option('-o', '--output-dir', dest='output_dir', default='output',
help='Path to directory for output files. CAREFUL: this '
'directory will be deleted and recreated before rendering!')
parser.add_option('-r', '--ref-dir', dest='ref_dir', default='data/reference',
help='Path to directory containing the reference images '
'to compare the result with')
parser.add_option('-t', '--template', dest='html_template',
default='data/template.html', help='Name of HTML template file')
parser.add_option('-e', '--only-errors', dest='only_errors', action='store_true',
default=False, help='Only include images in HTML file which '
'differ from reference')
parser.add_option('-q', '--quiet', dest='quiet', action='store_true',
default=False, help='Try to be quiet')
parser.add_option('--no-compare', dest='no_compare', action='store_true',
default=False, help='Do not compare with reference image, '
'only render to png')
parser.add_option('-c', '--create-reference', dest='create_reference',
metavar='DIR',
default=None, help='Do not output anything, render source to '
'specified directory for reference. CAREFUL: this directory '
'will be deleted and recreated before rendering!')
parser.add_option('--debug', dest='debug', action='store_true',
default=False, help='More output for debugging')
parser.add_option('--convert-cmd', dest='convert_cmd', default='/usr/bin/convert',
help='Path to ImageMagick "convert" tool')
parser.add_option('--compare-cmd', dest='compare_cmd', default='/usr/bin/compare',
help='Path to ImageMagick "compare" tool')
if __name__ == '__main__':
main()
| apache-2.0 |
fusion32/kaplar | configure.py | 1 | 4652 | #!/usr/bin/python
import sys
Usage = r'''
USAGE: python configure.py [options]
Options: (options in the same section are mutually exclusive)
-o <file> - write output to <file>
-srcdir <dir> - change source directory
-builddir <dir> - change build directory
-test - compiles ./main.c instead of <srcdir>/main.c
(this is useful for unit testing)
[compiler]:
-clang (default) -
-gcc -
[build type]:
-release (default) -
-debug -
[platform]:
-win32 (default) -
-linux -
-freebsd -
[endianess]:
-le (default) compile for little endian arch
-be compile for big endian arch
'''
MakefileHeader = r'''
CC = %s
CFLAGS = %s
LDFLAGS = %s
LDLIBS = %s
DEPS = \
%s
kaplar: \
%s
$(CC) -o %s $^ $(LDLIBS) $(LDFLAGS)
.PHONY: clean
clean:
@ rm -fR %s
'''
MakefileObject = r'''
%s: %s $(DEPS)
@mkdir -p $(@D)
$(CC) -c -o $@ $< $(CFLAGS)
'''
DEPS = [
"atomic.h", "cmdline.h", "connection.h", "log.h",
"message.h", "mmblock.h", "mm.h", "network.h",
"scheduler.h", "server.h", "system.h", "thread.h",
"types.h", "util.h", "work.h", "work_group.h",
]
COMMON = [
"adler32.o", "cmdline.o", "connection.o", "log.o",
"main.o", "message.o", "mmblock.o", "mm.o",
"protocol_game.o", "protocol_login.o", "protocol_old.o",
"protocol_test.o", "scheduler.o", "server.o", "work.o",
"work_group.o",
]
WIN32 = [
"win32/atomic.o",
"win32/system.o", "win32/thread.o", "win32/network.o",
]
LINUX = [
"linux/atomic.o",
"posix/system.o", "posix/thread.o", "linux/network.o",
]
FREEBSD = [
"freebsd/atomic.o",
"posix/system.o", "posix/thread.o", "freebsd/network.o",
]
if __name__ == "__main__":
# parse parameters
output = 'kaplar'
srcdir = 'src/'
builddir = 'build/'
test = False
compiler = "CLANG"
build = "RELEASE"
platform = "WIN32"
#default to this platform byteorder
byteorder = "LITTLE"
if sys.byteorder == "big":
endianess = "BIG"
args = iter(sys.argv[1:])
for opt in args:
#output name
if opt == "-o":
output = next(args)
#source dir
elif opt == "-srcdir":
srcdir = next(args)
#build dir
elif opt == "-builddir":
builddir = next(args)
#enable unit testing
elif opt == "-test":
test = True
#compilers
elif opt == "-clang":
compiler = "CLANG"
elif opt == "-gcc":
compiler = "GCC"
#build types
elif opt == "-release":
build = "RELEASE"
elif opt == "-debug":
build = "DEBUG"
#platforms
elif opt == "-win32":
platform = "WIN32"
elif opt == "-linux":
platform = "LINUX"
elif opt == "-freebsd":
platform = "FREEBSD"
#endianess
elif opt == "-le":
byteorder = "LITTLE"
elif opt == "-be":
byteorder = "BIG"
# invalid option
else:
print("[warning] Invalid option used: \"%s\"" % opt)
print(Usage)
sys.exit()
# set parameters
CC = ""
CFLAGS = "-std=c99 -Wall -Wno-pointer-sign"
CDEFS = "-D_XOPEN_SOURCE=700"
LDFLAGS = ""
LDLIBS = "-lc -lpthread"
OBJECTS = COMMON[:]
#check compiler
if compiler == "GCC":
CC = "gcc"
elif compiler == "CLANG":
CC = "clang"
else:
print("[error] invalid compiler")
sys.exit()
#check platform
if platform == "WIN32":
OBJECTS.extend(WIN32)
elif platform == "LINUX":
OBJECTS.extend(LINUX)
elif platform == "FREEBSD":
OBJECTS.extend(FREEBSD)
CDEFS += " -D__BSD_VISIBLE=1"
else:
print("[error] invalid platform")
sys.exit()
#check build
if build == "RELEASE":
LDFLAGS = "-s -O2"
elif build == "DEBUG":
CFLAGS = "-g " + CFLAGS
LDFLAGS = "-g"
else:
print("[error] invalid build type")
sys.exit()
#check endianess
if byteorder == "LITTLE":
pass
elif byteorder == "BIG":
CDEFS += " -D__BIG_ENDIAN__"
else:
print("[error] invalid byteorder")
sys.exit()
#concat CFLAGS and CDEFS
CFLAGS += " " + CDEFS
#add path to dependencies
DEPS = [srcdir + dep for dep in DEPS]
#create tuple (obj, src) for each object
OBJECTS = [(builddir + "obj/" + obj, srcdir + obj[:-2] + ".c")
for obj in OBJECTS]
#if testing, change <srcdir>/main.c to ./main.c
if test == True:
for (a, b) in OBJECTS:
if ("main.o" in a) or ("main.c" in b):
OBJECTS.remove((a, b))
OBJECTS.append((a, "main.c"))
break
# output to file
with open("Makefile", "w") as file:
file.write(MakefileHeader % (CC, CFLAGS, LDFLAGS, LDLIBS,
'\t\\\n\t'.join(DEPS),
'\t\\\n\t'.join(list(zip(*OBJECTS))[0]),
builddir + output, builddir))
for obj in OBJECTS:
file.write(MakefileObject % obj)
| mit |
myerssr/volatility | volatility/plugins/connscan.py | 44 | 3853 | # Volatility
# Copyright (C) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module implements the fast connection scanning
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
#pylint: disable-msg=C0111
import volatility.scan as scan
import volatility.plugins.common as common
import volatility.cache as cache
import volatility.utils as utils
import volatility.obj as obj
import volatility.debug as debug #pylint: disable-msg=W0611
class PoolScanConnFast(scan.PoolScanner):
def object_offset(self, found, address_space):
""" Return the offset of _TCPT_OBJECT """
return found + (address_space.profile.get_obj_size("_POOL_HEADER") -
address_space.profile.get_obj_offset("_POOL_HEADER", "PoolTag"))
checks = [ ('PoolTagCheck', dict(tag = "TCPT")),
('CheckPoolSize', dict(condition = lambda x: x >= 0x198)),
('CheckPoolType', dict(non_paged = True, free = True)),
('CheckPoolIndex', dict(value = 0)),
]
class ConnScan(common.AbstractWindowsCommand):
""" Scan Physical memory for _TCPT_OBJECT objects (tcp connections)
"""
meta_info = dict(
author = 'Brendan Dolan-Gavitt',
copyright = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt',
contact = 'bdolangavitt@wesleyan.edu',
license = 'GNU General Public License 2.0',
url = 'http://moyix.blogspot.com/',
os = 'WIN_32_XP_SP2',
version = '1.0',
)
@staticmethod
def is_valid_profile(profile):
return (profile.metadata.get('os', 'unknown') == 'windows' and
profile.metadata.get('major', 0) == 5)
@cache.CacheDecorator("scans/connscan2")
def calculate(self):
## Just grab the AS and scan it using our scanner
address_space = utils.load_as(self._config, astype = 'physical')
if not self.is_valid_profile(address_space.profile):
debug.error("This command does not support the selected profile.")
scanner = PoolScanConnFast()
for offset in scanner.scan(address_space):
## This yields the pool offsets - we want the actual object
tcp_obj = obj.Object('_TCPT_OBJECT', vm = address_space,
offset = offset)
yield tcp_obj
def render_text(self, outfd, data):
self.table_header(outfd,
[("Offset(P)", "[addrpad]"),
("Local Address", "25"),
("Remote Address", "25"),
("Pid", "")
])
for tcp_obj in data:
local = "{0}:{1}".format(tcp_obj.LocalIpAddress, tcp_obj.LocalPort)
remote = "{0}:{1}".format(tcp_obj.RemoteIpAddress, tcp_obj.RemotePort)
self.table_row(outfd,
tcp_obj.obj_offset,
local, remote,
tcp_obj.Pid)
| gpl-2.0 |
shadowmint/nwidget | lib/pyglet-1.4.4/tests/image/DEPTH_SAVE.py | 17 | 1679 | #!/usr/bin/env python
'''Test depth buffer save.
A scene consisting of a single coloured triangle will be rendered. The
depth buffer will then be saved to a stream and loaded as a texture.
You will see the original scene first for up to several seconds before the
depth buffer image appears (because retrieving and saving the image is
a slow operation). Messages will be printed to stdout indicating
what stage is occuring.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from StringIO import StringIO
import unittest
import base_save
from pyglet.gl import *
from pyglet import image
class TEST_DEPTH_SAVE(base_save.TestSave):
alpha = False
def draw_original(self):
glClear(GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glBegin(GL_TRIANGLES)
glColor4f(1, 0, 0, 1)
glVertex3f(0, 0, -1)
glColor4f(0, 1, 0, 1)
glVertex3f(200, 0, 0)
glColor4f(0, 0, 1, 1)
glVertex3f(0, 200, 1)
glEnd()
glDisable(GL_DEPTH_TEST)
glColor4f(1, 1, 1, 1)
def load_texture(self):
print 'Drawing scene...'
self.window.set_visible()
self.window.dispatch_events()
self.draw()
print 'Saving depth image...'
img = image.get_buffer_manager().get_depth_buffer()
file = StringIO()
img.save('buffer.png', file)
print 'Loading depth image as texture...'
file.seek(0)
self.saved_texture = image.load('buffer.png', file)
print 'Done.'
self.window.set_visible(False)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
deathping1994/sendmail-api | venv/lib/python2.7/site-packages/wheel/bdist_wheel.py | 219 | 17006 | """
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import wheel
try:
import sysconfig
except ImportError: # pragma nocover
# Python < 2.7
import distutils.sysconfig as sysconfig
import pkg_resources
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
from shutil import rmtree
from email.generator import Generator
from distutils.util import get_platform
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from .pep425tags import get_abbr_impl, get_impl_ver
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_purelib = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_purelib = self.distribution.is_pure()
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
return '-'.join((safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version())))
def get_tag(self):
supported_tags = pep425tags.get_supported()
if self.distribution.is_pure():
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', 'any')
else:
plat_name = self.plat_name
if plat_name is None:
plat_name = get_platform()
plat_name = plat_name.replace('-', '_').replace('.', '_')
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
# PEP 3149 -- no SOABI in Py 2
# For PyPy?
# "pp%s%s" % (sys.pypy_version_info.major,
# sys.pypy_version_info.minor)
abi_tag = sysconfig.get_config_vars().get('SOABI', 'none')
if abi_tag.startswith('cpython-'):
abi_tag = 'cp' + abi_tag.rsplit('-', 1)[-1]
tag = (impl_name + impl_ver, abi_tag, plat_name)
# XXX switch to this alternate implementation for non-pure:
assert tag == supported_tags[0]
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.join(self.data_dir, '..')
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_purelib else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel.__version__ + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_purelib).lower()
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if not 'license_file' in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if not key in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: set(('PKG-INFO',
'requires.txt',
'SOURCES.txt',
'not-zip-safe',)))
# delete dependency_links if it is only whitespace
dependency_links = os.path.join(distinfo_path, 'dependency_links.txt')
if not open(dependency_links, 'r').read().strip():
adios(dependency_links)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from wheel.util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
for f in files:
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
| apache-2.0 |
USBhost/Nexus-payer-kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
ARTbio/tools-artbio | tools/manta/customConfigManta.py | 1 | 3609 | import argparse
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--minCandidateVariantSize', type=int, default=8,
help="Run Manta reporting for all SVs/indels at or above this size")
the_parser.add_argument(
'--rnaMinCandidateVariantSize', type=int, default=1000,
help="Separate option (to provide different default) used for \
runs in RNA-mode")
the_parser.add_argument(
'--minEdgeObservations', type=int, default=3,
help="Remove all edges from the graph unless they're supported \
by this many 'observations'")
the_parser.add_argument(
'--graphNodeMaxEdgeCount', type=int, default=10,
help="If both nodes of an edge have an edge count higher than this, \
then skip evaluation of the edge")
the_parser.add_argument(
'--minCandidateSpanningCount', type=int, default=3,
help="Run discovery and candidate reporting for all SVs/indels with \
at least this many spanning support observations")
the_parser.add_argument(
'--minScoredVariantSize', type=int, default=50,
help="After candidate identification, only score and report \
SVs/indels at or above this size")
the_parser.add_argument(
'--minDiploidVariantScore', type=int, default=10,
help="minimum VCF QUAL score for a variant to be included in \
the diploid vcf")
the_parser.add_argument(
'--minPassDiploidVariantScore', type=int, default=20,
help="VCF QUAL score below which a variant is marked as \
filtered in the diploid vcf")
the_parser.add_argument(
'--minPassDiploidGTScore', type=int, default=15,
help="minimum genotype quality score below which single samples \
are filtered for a variant in the diploid vcf")
the_parser.add_argument(
'--minSomaticScore', type=int, default=10,
help="minimum VCF QUAL score for a variant to be included in the \
diploid vcf")
the_parser.add_argument(
'--minPassSomaticScore', type=int, default=30,
help="somatic quality scores below this level are filtered in the \
somatic vcf")
the_parser.add_argument(
'--enableRemoteReadRetrievalForInsertionsInGermlineCallingModes',
type=int, default=1,
help="includes tumor-normal subtraction and tumor-only calling")
the_parser.add_argument(
'--enableRemoteReadRetrievalForInsertionsInCancerCallingModes',
type=int, default=0,
help="GermlineCallingModes includes all other calling modes")
the_parser.add_argument(
'--useOverlapPairEvidence', type=int, default=0,
help="Set 1 if an overlapping read pair will be considered as \
evidence. Set to 0 to skip overlapping read pairs")
args = the_parser.parse_args()
return args
if __name__ == "__main__":
args = Parser()
# recover arguments as a dictionary with keys = argument name and values
# are argument values
argsDict = args.__dict__
ini_lines = []
# implement first, hard-coded ini lines
ini_lines.append('[manta]')
ini_lines.append('referenceFasta = /dummy/path/to/genome.fa')
# implement the rest of the ini lines for the argsDict
for argument in argsDict:
ini_lines.append("%s = %s" % (argument, str(argsDict[argument])))
# print ini_lines in configManta.py.ini
handler = open('configManta.py.ini', 'w')
for line in ini_lines:
handler.write("%s\n" % line)
| mit |
jsmits/github-cli | tests/test_issues_cli.py | 1 | 2111 | import os
import sys
from nose.tools import assert_raises
from github.issues import main
repo = 'jsmits/github-cli-public-test'
prog = 'ghi'
def test_commands():
for cmd, exp in test_input:
def check_command(cmd, exp):
base = [prog, '-r', repo]
args = cmd.split(' ')
if not args == ['']: # need this for 'just `ghi`' command test
base.extend(args)
sys.argv = base
if type(exp) == type(Exception):
assert_raises(exp, main)
else:
output = main()
assert output == exp
check_command.description = "command: %s %s" % (prog, cmd)
yield check_command, cmd, exp
test_input = (
# list commands
('list', None), ('list -v', None), ('', None), ('-v', None),
('lis', "error: command 'lis' not implemented"),
('l', "error: command 'l' not implemented"),
('list -s open', None), ('list -s o', None), ('list -s closed', None),
('list -s c', None), ('list -s all', None), ('list -s a', None),
('-s a', None), ('-s a -v', None), ('list -s close', SystemExit),
('list -u bobdole', None),
# show commands
('show 1', None), ('1', None), ('17288182', "error: server problem (HTTP"\
" Error 404: Not Found)"), ('5', None), ('5 -v', None),
# state modification commands
('close 1', None), ('open 1', None), ('c 1', None), ('close 1', None),
('o 1', None), ('open 1', None),
# label commands
('label add testing 1', None), ('label remove testing 1', None),
('al testing 1', None), ('rl testing 1', None),
('label add testing', "error: number required\nexample: ghi label add "\
"testing 1"),
# help commands
('--help', SystemExit), ('-h', SystemExit),
# browser commands
('-w', SystemExit), ('1 -w', SystemExit),
# search commands
('search test', None), ('s test', None), ('search test -s open', None),
('search test -s o', None), ('search test -s closed', None),
('search test -s c', None), ('s test -s c', None), ('search', SystemExit),
)
| bsd-3-clause |
jasondunsmore/heat | heat/engine/timestamp.py | 8 | 1599 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
class Timestamp(object):
"""A descriptor for writing a timestamp to the database."""
def __init__(self, db_fetch, attribute):
"""Initialise the timestamp descriptor.
Initialise with a function to fetch the database representation of an
object (given a context and ID) and the name of the attribute to
retrieve.
"""
self.db_fetch = db_fetch
self.attribute = attribute
def __get__(self, obj, obj_class):
"""Get timestamp for the given object and class."""
if obj is None or obj.id is None:
return None
o = self.db_fetch(obj.context, obj.id)
return getattr(o, self.attribute)
def __set__(self, obj, timestamp):
"""Update the timestamp for the given object."""
if obj.id is None:
raise exception.ResourceNotAvailable(resource_name=obj.name)
o = self.db_fetch(obj.context, obj.id)
o.update_and_save({self.attribute: timestamp})
| apache-2.0 |
izonder/intellij-community | python/helpers/pydev/pydevd_resolver.py | 38 | 19673 | try:
import StringIO
except:
import io as StringIO
import traceback
from os.path import basename
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
import pydevd_constants
from pydevd_constants import DictIterItems, DictKeys, xrange
# Note: 300 is already a lot to see in the outline (after that the user should really use the shell to get things)
# and this also means we'll pass less information to the client side (which makes debugging faster).
MAX_ITEMS_TO_HANDLE = 300
TOO_LARGE_MSG = 'Too large to show contents. Max items to show: ' + str(MAX_ITEMS_TO_HANDLE)
TOO_LARGE_ATTR = 'Unable to handle:'
#=======================================================================================================================
# UnableToResolveVariableException
#=======================================================================================================================
class UnableToResolveVariableException(Exception):
pass
#=======================================================================================================================
# InspectStub
#=======================================================================================================================
class InspectStub:
def isbuiltin(self, _args):
return False
def isroutine(self, object):
return False
try:
import inspect
except:
inspect = InspectStub()
try:
import java.lang #@UnresolvedImport
except:
pass
#types does not include a MethodWrapperType
try:
MethodWrapperType = type([].__str__)
except:
MethodWrapperType = None
#=======================================================================================================================
# AbstractResolver
#=======================================================================================================================
class AbstractResolver:
'''
This class exists only for documentation purposes to explain how to create a resolver.
Some examples on how to resolve things:
- list: getDictionary could return a dict with index->item and use the index to resolve it later
- set: getDictionary could return a dict with id(object)->object and reiterate in that array to resolve it later
- arbitrary instance: getDictionary could return dict with attr_name->attr and use getattr to resolve it later
'''
def resolve(self, var, attribute):
'''
In this method, we'll resolve some child item given the string representation of the item in the key
representing the previously asked dictionary.
@param var: this is the actual variable to be resolved.
@param attribute: this is the string representation of a key previously returned in getDictionary.
'''
raise NotImplementedError
def getDictionary(self, var):
'''
@param var: this is the variable that should have its children gotten.
@return: a dictionary where each pair key, value should be shown to the user as children items
in the variables view for the given var.
'''
raise NotImplementedError
#=======================================================================================================================
# DefaultResolver
#=======================================================================================================================
class DefaultResolver:
'''
DefaultResolver is the class that'll actually resolve how to show some variable.
'''
def resolve(self, var, attribute):
return getattr(var, attribute)
def getDictionary(self, var):
if MethodWrapperType:
return self._getPyDictionary(var)
else:
return self._getJyDictionary(var)
def _getJyDictionary(self, obj):
ret = {}
found = java.util.HashMap()
original = obj
if hasattr(obj, '__class__') and obj.__class__ == java.lang.Class:
#get info about superclasses
classes = []
classes.append(obj)
c = obj.getSuperclass()
while c != None:
classes.append(c)
c = c.getSuperclass()
#get info about interfaces
interfs = []
for obj in classes:
interfs.extend(obj.getInterfaces())
classes.extend(interfs)
#now is the time when we actually get info on the declared methods and fields
for obj in classes:
declaredMethods = obj.getDeclaredMethods()
declaredFields = obj.getDeclaredFields()
for i in xrange(len(declaredMethods)):
name = declaredMethods[i].getName()
ret[name] = declaredMethods[i].toString()
found.put(name, 1)
for i in xrange(len(declaredFields)):
name = declaredFields[i].getName()
found.put(name, 1)
#if declaredFields[i].isAccessible():
declaredFields[i].setAccessible(True)
#ret[name] = declaredFields[i].get( declaredFields[i] )
try:
ret[name] = declaredFields[i].get(original)
except:
ret[name] = declaredFields[i].toString()
#this simple dir does not always get all the info, that's why we have the part before
#(e.g.: if we do a dir on String, some methods that are from other interfaces such as
#charAt don't appear)
try:
d = dir(original)
for name in d:
if found.get(name) is not 1:
ret[name] = getattr(original, name)
except:
#sometimes we're unable to do a dir
pass
return ret
def _getPyDictionary(self, var):
filterPrivate = False
filterSpecial = True
filterFunction = True
filterBuiltIn = True
names = dir(var)
if not names and hasattr(var, '__members__'):
names = var.__members__
d = {}
#Be aware that the order in which the filters are applied attempts to
#optimize the operation by removing as many items as possible in the
#first filters, leaving fewer items for later filters
if filterBuiltIn or filterFunction:
for n in names:
if filterSpecial:
if n.startswith('__') and n.endswith('__'):
continue
if filterPrivate:
if n.startswith('_') or n.endswith('__'):
continue
try:
attr = getattr(var, n)
#filter builtins?
if filterBuiltIn:
if inspect.isbuiltin(attr):
continue
#filter functions?
if filterFunction:
if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType):
continue
except:
#if some error occurs getting it, let's put it to the user.
strIO = StringIO.StringIO()
traceback.print_exc(file=strIO)
attr = strIO.getvalue()
d[ n ] = attr
return d
#=======================================================================================================================
# DictResolver
#=======================================================================================================================
class DictResolver:
def resolve(self, dict, key):
if key in ('__len__', TOO_LARGE_ATTR):
return None
if '(' not in key:
#we have to treat that because the dict resolver is also used to directly resolve the global and local
#scopes (which already have the items directly)
try:
return dict[key]
except:
return getattr(dict, key)
#ok, we have to iterate over the items to find the one that matches the id, because that's the only way
#to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key, val in DictIterItems(dict):
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
def keyStr(self, key):
if isinstance(key, str):
return '%r'%key
else:
if not pydevd_constants.IS_PY3K:
if isinstance(key, unicode):
return "u'%s'"%key
return key
def getDictionary(self, dict):
ret = {}
i = 0
for key, val in DictIterItems(dict):
i += 1
#we need to add the id because otherwise we cannot find the real object to get its contents later on.
key = '%s (%s)' % (self.keyStr(key), id(key))
ret[key] = val
if i > MAX_ITEMS_TO_HANDLE:
ret[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
ret['__len__'] = len(dict)
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.getDictionary(dict)
ret.update(additional_fields)
return ret
#=======================================================================================================================
# TupleResolver
#=======================================================================================================================
class TupleResolver: #to enumerate tuples and lists
def resolve(self, var, attribute):
'''
@param var: that's the original attribute
@param attribute: that's the key passed in the dict (as a string)
'''
if attribute in ('__len__', TOO_LARGE_ATTR):
return None
try:
return var[int(attribute)]
except:
return getattr(var, attribute)
def getDictionary(self, var):
l = len(var)
d = {}
format_str = '%0' + str(int(len(str(l)))) + 'd'
i = 0
for item in var:
d[format_str % i] = item
i += 1
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
d['__len__'] = len(var)
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.getDictionary(var)
d.update(additional_fields)
return d
#=======================================================================================================================
# SetResolver
#=======================================================================================================================
class SetResolver:
'''
Resolves a set as dict id(object)->object
'''
def resolve(self, var, attribute):
if attribute in ('__len__', TOO_LARGE_ATTR):
return None
try:
attribute = int(attribute)
except:
return getattr(var, attribute)
for v in var:
if id(v) == attribute:
return v
raise UnableToResolveVariableException('Unable to resolve %s in %s' % (attribute, var))
def getDictionary(self, var):
d = {}
i = 0
for item in var:
i+= 1
d[id(item)] = item
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
d['__len__'] = len(var)
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.getDictionary(var)
d.update(additional_fields)
return d
#=======================================================================================================================
# InstanceResolver
#=======================================================================================================================
class InstanceResolver:
def resolve(self, var, attribute):
field = var.__class__.getDeclaredField(attribute)
field.setAccessible(True)
return field.get(var)
def getDictionary(self, obj):
ret = {}
declaredFields = obj.__class__.getDeclaredFields()
for i in xrange(len(declaredFields)):
name = declaredFields[i].getName()
try:
declaredFields[i].setAccessible(True)
ret[name] = declaredFields[i].get(obj)
except:
traceback.print_exc()
return ret
#=======================================================================================================================
# JyArrayResolver
#=======================================================================================================================
class JyArrayResolver:
'''
This resolves a regular Object[] array from java
'''
def resolve(self, var, attribute):
if attribute == '__len__':
return None
return var[int(attribute)]
def getDictionary(self, obj):
ret = {}
for i in xrange(len(obj)):
ret[ i ] = obj[i]
ret['__len__'] = len(obj)
return ret
#=======================================================================================================================
# NdArrayResolver
#=======================================================================================================================
class NdArrayResolver:
'''
This resolves a numpy ndarray returning some metadata about the NDArray
'''
def is_numeric(self, obj):
if not hasattr(obj, 'dtype'):
return False
return obj.dtype.kind in 'biufc'
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.getDictionary(obj)
if attribute == 'min':
if self.is_numeric(obj):
return obj.min()
else:
return None
if attribute == 'max':
if self.is_numeric(obj):
return obj.max()
else:
return None
if attribute == 'shape':
return obj.shape
if attribute == 'dtype':
return obj.dtype
if attribute == 'size':
return obj.size
if attribute.startswith('['):
container = NdArrayItemsContainer()
i = 0
format_str = '%0' + str(int(len(str(len(obj))))) + 'd'
for item in obj:
setattr(container, format_str % i, item)
i += 1
if i > MAX_ITEMS_TO_HANDLE:
setattr(container, TOO_LARGE_ATTR, TOO_LARGE_MSG)
break
return container
return None
def getDictionary(self, obj):
ret = dict()
ret['__internals__'] = defaultResolver.getDictionary(obj)
if obj.size > 1024 * 1024:
ret['min'] = 'ndarray too big, calculating min would slow down debugging'
ret['max'] = 'ndarray too big, calculating max would slow down debugging'
else:
if self.is_numeric(obj):
ret['min'] = obj.min()
ret['max'] = obj.max()
else:
ret['min'] = 'not a numeric object'
ret['max'] = 'not a numeric object'
ret['shape'] = obj.shape
ret['dtype'] = obj.dtype
ret['size'] = obj.size
ret['[0:%s] ' % (len(obj))] = list(obj[0:MAX_ITEMS_TO_HANDLE])
return ret
class NdArrayItemsContainer: pass
#=======================================================================================================================
# MultiValueDictResolver
#=======================================================================================================================
class MultiValueDictResolver(DictResolver):
def resolve(self, dict, key):
if key in ('__len__', TOO_LARGE_ATTR):
return None
#ok, we have to iterate over the items to find the one that matches the id, because that's the only way
#to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key in DictKeys(dict):
val = dict.getlist(key)
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
def getDictionary(self, dict):
ret = {}
i = 0
for key in DictKeys(dict):
val = dict.getlist(key)
i += 1
#we need to add the id because otherwise we cannot find the real object to get its contents later on.
key = '%s (%s)' % (self.keyStr(key), id(key))
ret[key] = val
if i > MAX_ITEMS_TO_HANDLE:
ret[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
ret['__len__'] = len(dict)
return ret
#=======================================================================================================================
# DequeResolver
#=======================================================================================================================
class DequeResolver(TupleResolver):
def getDictionary(self, var):
d = TupleResolver.getDictionary(self, var)
d['maxlen'] = getattr(var, 'maxlen', None)
return d
#=======================================================================================================================
# FrameResolver
#=======================================================================================================================
class FrameResolver:
'''
This resolves a frame.
'''
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.getDictionary(obj)
if attribute == 'stack':
return self.getFrameStack(obj)
if attribute == 'f_locals':
return obj.f_locals
return None
def getDictionary(self, obj):
ret = dict()
ret['__internals__'] = defaultResolver.getDictionary(obj)
ret['stack'] = self.getFrameStack(obj)
ret['f_locals'] = obj.f_locals
return ret
def getFrameStack(self, frame):
ret = []
if frame is not None:
ret.append(self.getFrameName(frame))
while frame.f_back:
frame = frame.f_back
ret.append(self.getFrameName(frame))
return ret
def getFrameName(self, frame):
if frame is None:
return 'None'
try:
name = basename(frame.f_code.co_filename)
return 'frame: %s [%s:%s] id:%s' % (frame.f_code.co_name, name, frame.f_lineno, id(frame))
except:
return 'frame object'
defaultResolver = DefaultResolver()
dictResolver = DictResolver()
tupleResolver = TupleResolver()
instanceResolver = InstanceResolver()
jyArrayResolver = JyArrayResolver()
setResolver = SetResolver()
ndarrayResolver = NdArrayResolver()
multiValueDictResolver = MultiValueDictResolver()
dequeResolver = DequeResolver()
frameResolver = FrameResolver()
| apache-2.0 |
erkanay/django | django/contrib/auth/decorators.py | 65 | 3164 | from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.utils.decorators import available_attrs
from django.utils.encoding import force_str
from django.utils.six.moves.urllib.parse import urlparse
from django.shortcuts import resolve_url
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request.user):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
# urlparse chokes on lazy objects in Python 3, force to str
resolved_login_url = force_str(
resolve_url(login_url or settings.LOGIN_URL))
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def login_required(function=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None):
"""
Decorator for views that checks that the user is logged in, redirecting
to the log-in page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_authenticated(),
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def permission_required(perm, login_url=None, raise_exception=False):
"""
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
"""
def check_perms(user):
if not isinstance(perm, (list, tuple)):
perms = (perm, )
else:
perms = perm
# First check if the user has the permission (even anon users)
if user.has_perms(perms):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
# As the last resort, show the login form
return False
return user_passes_test(check_perms, login_url=login_url)
| bsd-3-clause |
darkryder/django | tests/flatpages_tests/test_middleware.py | 36 | 9046 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/flatpage_root/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertContains(response, "<p>Isn't it special!</p>")
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
)
class FlatpageMiddlewareClassesTests(FlatpageMiddlewareTests):
pass
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertContains(response, "<p>Root</p>")
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
)
class FlatpageAppendSlashMiddlewareClassesTests(FlatpageMiddlewareAppendSlashTests):
pass
| bsd-3-clause |
CHT5/program-y | src/test/parser/template/graph/test_random.py | 3 | 3257 | import unittest
import xml.etree.ElementTree as ET
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.rand import TemplateRandomNode
from test.parser.template.graph.test_graph_client import TemplateGraphTestClient
class TemplateGraphRandomTests(TemplateGraphTestClient):
def test_random_template_no_li(self):
template = ET.fromstring("""
<template>
<random>
</random>
</template>
""")
with self.assertRaises(ParserException):
ast = self.parser.parse_template_expression(template)
def test_random_template(self):
template = ET.fromstring("""
<template>
<random>
<li>1</li>
<li>2</li>
<li>3</li>
</random>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateRandomNode)
self.assertEqual(3, len(ast.children[0].children))
self.assertIsInstance(ast.children[0].children[0], TemplateNode)
self.assertIsInstance(ast.children[0].children[1], TemplateNode)
self.assertIsInstance(ast.children[0].children[2], TemplateNode)
selection = ast.children[0].resolve(self.test_bot, self.test_clientid)
self.assertIsNotNone(selection)
self.assertIn(selection, ['1', '2', '3'])
def test_random_nested_template(self):
template = ET.fromstring("""
<template>
<random>
<li>
<random>
<li>Say something</li>
<li>Say the other</li>
</random>
</li>
<li>
<random>
<li>Hello world!</li>
<li>Goodbye cruel world</li>
</random>
</li>
</random>
</template>
""")
ast = self.parser.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertIsNotNone(ast.children[0])
self.assertIsInstance(ast.children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children))
self.assertIsInstance(ast.children[0].children[0], TemplateNode)
self.assertEqual(1, len(ast.children[0].children[0].children))
self.assertIsInstance(ast.children[0].children[0].children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children[0].children[0].children))
self.assertIsInstance(ast.children[0].children[1], TemplateNode)
self.assertEqual(1, len(ast.children[0].children[1].children))
self.assertIsInstance(ast.children[0].children[1].children[0], TemplateRandomNode)
self.assertEqual(2, len(ast.children[0].children[1].children[0].children))
selection = ast.children[0].resolve(self.test_bot, self.test_clientid)
self.assertIsNotNone(selection)
self.assertIn(selection, ['Say something', 'Say the other', 'Hello world!', 'Goodbye cruel world'])
if __name__ == '__main__':
unittest.main()
| mit |
marcoarruda/MissionPlanner | Lib/site-packages/numpy/lib/tests/test_type_check.py | 55 | 10683 | from numpy.testing import *
from numpy.lib import *
from numpy.core import *
from numpy.compat import asbytes
def assert_all(x):
assert(all(x)), x
class TestCommonType(TestCase):
def test_basic(self):
ai32 = array([[1,2],[3,4]], dtype=int32)
af32 = array([[1,2],[3,4]], dtype=float32)
af64 = array([[1,2],[3,4]], dtype=float64)
acs = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=csingle)
acd = array([[1+5j,2+6j],[3+7j,4+8j]], dtype=cdouble)
assert common_type(af32) == float32
assert common_type(af64) == float64
assert common_type(acs) == csingle
assert common_type(acd) == cdouble
class TestMintypecode(TestCase):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype),'d')
assert_equal(mintypecode('f'),'f')
assert_equal(mintypecode('d'),'d')
assert_equal(mintypecode('F'),'F')
assert_equal(mintypecode('D'),'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'),'f')
assert_equal(mintypecode(itype+'d'),'d')
assert_equal(mintypecode(itype+'F'),'F')
assert_equal(mintypecode(itype+'D'),'D')
assert_equal(mintypecode('ff'),'f')
assert_equal(mintypecode('fd'),'d')
assert_equal(mintypecode('fF'),'F')
assert_equal(mintypecode('fD'),'D')
assert_equal(mintypecode('df'),'d')
assert_equal(mintypecode('dd'),'d')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'),'D')
assert_equal(mintypecode('dD'),'D')
assert_equal(mintypecode('Ff'),'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'),'D')
assert_equal(mintypecode('FF'),'F')
assert_equal(mintypecode('FD'),'D')
assert_equal(mintypecode('Df'),'D')
assert_equal(mintypecode('Dd'),'D')
assert_equal(mintypecode('DF'),'D')
assert_equal(mintypecode('DD'),'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'),'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'),'D')
assert_equal(mintypecode('fFD'),'D')
assert_equal(mintypecode('dFD'),'D')
assert_equal(mintypecode('ifd'),'d')
assert_equal(mintypecode('ifF'),'F')
assert_equal(mintypecode('ifD'),'D')
assert_equal(mintypecode('idF'),'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'),'D')
class TestIsscalar(TestCase):
def test_basic(self):
assert(isscalar(3))
assert(not isscalar([3]))
assert(not isscalar((3,)))
assert(isscalar(3j))
assert(isscalar(10L))
assert(isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
y = rand(10,)
assert_array_equal(y,real(y))
def test_cmplx(self):
y = rand(10,)+1j*rand(10,)
assert_array_equal(y.real,real(y))
class TestImag(TestCase):
def test_real(self):
y = rand(10,)
assert_array_equal(0,imag(y))
def test_cmplx(self):
y = rand(10,)+1j*rand(10,)
assert_array_equal(y.imag,imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
z = array([-1,0,1])
res = iscomplex(z)
assert(not sometrue(res,axis=0))
def test_pass(self):
z = array([-1j,1,0])
res = iscomplex(z)
assert_array_equal(res,[1,0,0])
class TestIsreal(TestCase):
def test_pass(self):
z = array([-1,0,1j])
res = isreal(z)
assert_array_equal(res,[1,1,0])
def test_fail(self):
z = array([-1j,1,0])
res = isreal(z)
assert_array_equal(res,[0,1,1])
class TestIscomplexobj(TestCase):
def test_basic(self):
z = array([-1,0,1])
assert(not iscomplexobj(z))
z = array([-1j,0,-1])
assert(iscomplexobj(z))
class TestIsrealobj(TestCase):
def test_basic(self):
z = array([-1,0,1])
assert(isrealobj(z))
z = array([-1j,0,-1])
assert(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
z = array((-1.,0.,1.))
res = isnan(z) == 0
assert_all(alltrue(res,axis=0))
def test_posinf(self):
olderr = seterr(divide='ignore')
try:
assert_all(isnan(array((1.,))/0.) == 0)
finally:
seterr(**olderr)
def test_neginf(self):
olderr = seterr(divide='ignore')
try:
assert_all(isnan(array((-1.,))/0.) == 0)
finally:
seterr(**olderr)
def test_ind(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isnan(array((0.,))/0.) == 1)
finally:
seterr(**olderr)
#def test_qnan(self): log(-1) return pi*j now
# assert_all(isnan(log(-1.)) == 1)
def test_integer(self):
assert_all(isnan(1) == 0)
def test_complex(self):
assert_all(isnan(1+1j) == 0)
def test_complex1(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isnan(array(0+0j)/0.) == 1)
finally:
seterr(**olderr)
class TestIsfinite(TestCase):
def test_goodvalues(self):
z = array((-1.,0.,1.))
res = isfinite(z) == 1
assert_all(alltrue(res,axis=0))
def test_posinf(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isfinite(array((1.,))/0.) == 0)
finally:
seterr(**olderr)
def test_neginf(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isfinite(array((-1.,))/0.) == 0)
finally:
seterr(**olderr)
def test_ind(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isfinite(array((0.,))/0.) == 0)
finally:
seterr(**olderr)
#def test_qnan(self):
# assert_all(isfinite(log(-1.)) == 0)
def test_integer(self):
assert_all(isfinite(1) == 1)
def test_complex(self):
assert_all(isfinite(1+1j) == 1)
def test_complex1(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isfinite(array(1+1j)/0.) == 0)
finally:
seterr(**olderr)
class TestIsinf(TestCase):
def test_goodvalues(self):
z = array((-1.,0.,1.))
res = isinf(z) == 0
assert_all(alltrue(res,axis=0))
def test_posinf(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isinf(array((1.,))/0.) == 1)
finally:
seterr(**olderr)
def test_posinf_scalar(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isinf(array(1.,)/0.) == 1)
finally:
seterr(**olderr)
def test_neginf(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isinf(array((-1.,))/0.) == 1)
finally:
seterr(**olderr)
def test_neginf_scalar(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isinf(array(-1.)/0.) == 1)
finally:
seterr(**olderr)
def test_ind(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
assert_all(isinf(array((0.,))/0.) == 0)
finally:
seterr(**olderr)
#def test_qnan(self):
# assert_all(isinf(log(-1.)) == 0)
# assert_all(isnan(log(-1.)) == 1)
class TestIsposinf(TestCase):
def test_generic(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
vals = isposinf(array((-1.,0,1))/0.)
finally:
seterr(**olderr)
assert(vals[0] == 0)
assert(vals[1] == 0)
assert(vals[2] == 1)
class TestIsneginf(TestCase):
def test_generic(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
vals = isneginf(array((-1.,0,1))/0.)
finally:
seterr(**olderr)
assert(vals[0] == 1)
assert(vals[1] == 0)
assert(vals[2] == 0)
class TestNanToNum(TestCase):
def test_generic(self):
olderr = seterr(divide='ignore', invalid='ignore')
try:
vals = nan_to_num(array((-1.,0,1))/0.)
finally:
seterr(**olderr)
assert_all(vals[0] < -1e10) and assert_all(isfinite(vals[0]))
assert(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
v = 1+1j
olderr = seterr(divide='ignore', invalid='ignore')
try:
v += array(0+1.j)/0.
finally:
seterr(**olderr)
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(isfinite(vals))
def test_complex_bad2(self):
v = 1+1j
olderr = seterr(divide='ignore', invalid='ignore')
try:
v += array(-1+1.j)/0.
finally:
seterr(**olderr)
vals = nan_to_num(v)
assert_all(isfinite(vals))
#assert_all(vals.imag > 1e10) and assert_all(isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
a = rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a,b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j,tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(TestCase):
def test_asfarray(self):
a = asfarray(array([1,2,3]))
assert_equal(a.__class__,ndarray)
assert issubdtype(a.dtype,float)
class TestDateTimeData:
@dec.skipif(True, "datetime_data gives error")
def test_basic(self):
a = array(['1980-03-23'], dtype=datetime64)
assert_equal(datetime_data(a.dtype), (asbytes('us'), 1, 1, 1))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
octavioturra/aritial | google_appengine/lib/django/django/contrib/admin/templatetags/adminapplist.py | 33 | 3018 | from django import template
from django.db.models import get_models
register = template.Library()
class AdminApplistNode(template.Node):
def __init__(self, varname):
self.varname = varname
def render(self, context):
from django.db import models
from django.utils.text import capfirst
app_list = []
user = context['user']
for app in models.get_apps():
# Determine the app_label.
app_models = get_models(app)
if not app_models:
continue
app_label = app_models[0]._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
model_list = []
for m in app_models:
if m._meta.admin:
perms = {
'add': user.has_perm("%s.%s" % (app_label, m._meta.get_add_permission())),
'change': user.has_perm("%s.%s" % (app_label, m._meta.get_change_permission())),
'delete': user.has_perm("%s.%s" % (app_label, m._meta.get_delete_permission())),
}
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_list.append({
'name': capfirst(m._meta.verbose_name_plural),
'admin_url': '%s/%s/' % (app_label, m.__name__.lower()),
'perms': perms,
})
if model_list:
# Sort using verbose decorate-sort-undecorate pattern
# instead of key argument to sort() for python 2.3 compatibility
decorated = [(x['name'], x) for x in model_list]
decorated.sort()
model_list = [x for key, x in decorated]
app_list.append({
'name': app_label.title(),
'has_module_perms': has_module_perms,
'models': model_list,
})
context[self.varname] = app_list
return ''
def get_admin_app_list(parser, token):
"""
Returns a list of installed applications and models for which the current user
has at least one permission.
Syntax::
{% get_admin_app_list as [context_var_containing_app_list] %}
Example usage::
{% get_admin_app_list as admin_app_list %}
"""
tokens = token.contents.split()
if len(tokens) < 3:
raise template.TemplateSyntaxError, "'%s' tag requires two arguments" % tokens[0]
if tokens[1] != 'as':
raise template.TemplateSyntaxError, "First argument to '%s' tag must be 'as'" % tokens[0]
return AdminApplistNode(tokens[2])
register.tag('get_admin_app_list', get_admin_app_list)
| apache-2.0 |
cshields/satnogs-network | network/base/models.py | 1 | 9752 | from datetime import datetime, timedelta
from shortuuidfield import ShortUUIDField
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.timezone import now
from django.conf import settings
from django.utils.html import format_html
from network.users.models import User
from network.base.helpers import get_apikey
RIG_TYPES = ['Radio', 'SDR']
ANTENNA_BANDS = ['HF', 'VHF', 'UHF', 'L', 'S', 'C', 'X', 'KU']
ANTENNA_TYPES = (
('dipole', 'Dipole'),
('yagi', 'Yagi'),
('helical', 'Helical'),
('parabolic', 'Parabolic'),
('vertical', 'Verical'),
)
OBSERVATION_STATUSES = (
('unknown', 'Unknown'),
('verified', 'Verified'),
('data_not_verified', 'Has Data, Not Verified'),
('no_data', 'No Data'),
)
class Rig(models.Model):
name = models.CharField(choices=zip(RIG_TYPES, RIG_TYPES), max_length=10)
rictld_number = models.PositiveIntegerField(blank=True, null=True)
def __unicode__(self):
return '{0}: {1}'.format(self.name, self.rictld_number)
class Mode(models.Model):
name = models.CharField(max_length=10, unique=True)
def __unicode__(self):
return self.name
class Antenna(models.Model):
"""Model for antennas tracked with SatNOGS."""
frequency = models.FloatField(validators=[MinValueValidator(0)])
band = models.CharField(choices=zip(ANTENNA_BANDS, ANTENNA_BANDS),
max_length=5)
antenna_type = models.CharField(choices=ANTENNA_TYPES, max_length=15)
def __unicode__(self):
return '{0} - {1} - {2}'.format(self.band, self.antenna_type, self.frequency)
class Station(models.Model):
"""Model for SatNOGS ground stations."""
owner = models.ForeignKey(User)
name = models.CharField(max_length=45)
image = models.ImageField(upload_to='ground_stations', blank=True)
alt = models.PositiveIntegerField(help_text='In meters above ground')
lat = models.FloatField(validators=[MaxValueValidator(90),
MinValueValidator(-90)])
lng = models.FloatField(validators=[MaxValueValidator(180),
MinValueValidator(-180)])
qthlocator = models.CharField(max_length=255, blank=True)
location = models.CharField(max_length=255, blank=True)
antenna = models.ManyToManyField(Antenna, blank=True,
help_text=('If you want to add a new Antenna contact '
'<a href="https://community.satnogs.org/" '
'target="_blank">SatNOGS Team</a>'))
featured_date = models.DateField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=False)
last_seen = models.DateTimeField(null=True, blank=True)
horizon = models.PositiveIntegerField(help_text='In degrees above 0', default=10)
uuid = models.CharField(db_index=True, max_length=100, blank=True)
rig = models.ForeignKey(Rig, blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
ordering = ['-active', '-last_seen']
def get_image(self):
if self.image and hasattr(self.image, 'url'):
return self.image.url
else:
return settings.STATION_DEFAULT_IMAGE
@property
def online(self):
try:
heartbeat = self.last_seen + timedelta(minutes=int(settings.STATION_HEARTBEAT_TIME))
return self.active and heartbeat > now()
except:
return False
def state(self):
if self.online:
return format_html('<span style="color:green">Online</span>')
else:
return format_html('<span style="color:red">Offline</span>')
@property
def success_rate(self):
observations = self.data_set.all().count()
success = self.data_set.exclude(payload='').count()
if observations:
return int(100 * (float(success) / float(observations)))
else:
return False
@property
def apikey(self):
return get_apikey(user=self.owner)
def __unicode__(self):
return "%d - %s" % (self.pk, self.name)
class Satellite(models.Model):
"""Model for SatNOGS satellites."""
norad_cat_id = models.PositiveIntegerField()
name = models.CharField(max_length=45)
names = models.TextField(blank=True)
image = models.CharField(max_length=100, blank=True, null=True)
manual_tle = models.BooleanField(default=False)
class Meta:
ordering = ['norad_cat_id']
def get_image(self):
if self.image:
return self.image
else:
return settings.SATELLITE_DEFAULT_IMAGE
@property
def latest_tle(self):
try:
latest_tle = Tle.objects.filter(satellite=self).latest('updated')
return latest_tle
except Tle.DoesNotExist:
return False
@property
def tle_no(self):
try:
line = self.latest_tle.tle1
return line[65:68]
except:
return False
@property
def tle_epoch(self):
try:
line = self.latest_tle.tle1
yd, s = line[18:32].split('.')
epoch = (datetime.strptime(yd, "%y%j") +
timedelta(seconds=float("." + s) * 24 * 60 * 60))
return epoch
except:
return False
def __unicode__(self):
return self.name
class Tle(models.Model):
tle0 = models.CharField(max_length=100, blank=True)
tle1 = models.CharField(max_length=200, blank=True)
tle2 = models.CharField(max_length=200, blank=True)
updated = models.DateTimeField(auto_now=True, blank=True)
satellite = models.ForeignKey(Satellite, related_name='tles', null=True)
class Meta:
ordering = ['tle0']
def __unicode__(self):
return self.tle0
class Transmitter(models.Model):
"""Model for antennas transponders."""
uuid = ShortUUIDField(db_index=True)
description = models.TextField()
alive = models.BooleanField(default=True)
uplink_low = models.PositiveIntegerField(blank=True, null=True)
uplink_high = models.PositiveIntegerField(blank=True, null=True)
downlink_low = models.PositiveIntegerField(blank=True, null=True)
downlink_high = models.PositiveIntegerField(blank=True, null=True)
mode = models.ForeignKey(Mode, related_name='transmitters', blank=True,
null=True, on_delete=models.SET_NULL)
invert = models.BooleanField(default=False)
baud = models.FloatField(validators=[MinValueValidator(0)], null=True, blank=True)
satellite = models.ForeignKey(Satellite, related_name='transmitters', null=True)
def __unicode__(self):
return self.description
class Observation(models.Model):
"""Model for SatNOGS observations."""
satellite = models.ForeignKey(Satellite)
transmitter = models.ForeignKey(Transmitter, null=True, related_name='observations')
tle = models.ForeignKey(Tle, null=True)
author = models.ForeignKey(User)
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ['-start', '-end']
@property
def is_past(self):
return self.end < now()
@property
def is_future(self):
return self.end > now()
@property
def is_deletable(self):
deletion = self.start - timedelta(minutes=int(settings.OBSERVATION_MAX_DELETION_RANGE))
return deletion > now()
# observation has at least 1 payload submitted, no verification taken into account
@property
def has_submitted_data(self):
return self.data_set.exclude(payload='').count()
# observaton has at least 1 payload that has been verified good
@property
def has_verified_data(self):
return self.data_set.filter(vetted_status='verified').count()
# observation is vetted to be all bad data
@property
def has_no_data(self):
return self.data_set.filter(vetted_status='no_data').count() == self.data_set.count()
# observation has at least 1 payload left unvetted
@property
def has_unvetted_data(self):
return self.data_set.filter(vetted_status='unknown').count()
def __unicode__(self):
return '{0}'.format(self.id)
class Data(models.Model):
"""Model for observation data."""
start = models.DateTimeField()
end = models.DateTimeField()
observation = models.ForeignKey(Observation)
ground_station = models.ForeignKey(Station)
payload = models.FileField(upload_to='data_payloads', blank=True, null=True)
vetted_datetime = models.DateTimeField(null=True, blank=True)
vetted_user = models.ForeignKey(User, related_name="vetted_user_set", null=True, blank=True)
vetted_status = models.CharField(choices=OBSERVATION_STATUSES,
max_length=10, default='unknown')
@property
def is_past(self):
return self.end < now()
# this payload has been vetted good/bad by someone
@property
def is_vetted(self):
return not self.vetted_status == 'unknown'
# this payload has been vetted as good by someone
@property
def is_verified(self):
return self.vetted_status == 'verified'
# this payload has been vetted as bad by someone
@property
def is_no_data(self):
return self.vetted_status == 'no_data'
class Meta:
ordering = ['-start', '-end']
class DemodData(models.Model):
data = models.ForeignKey(Data, related_name='demoddata')
payload_demod = models.FileField(upload_to='data_payloads', blank=True, null=True)
| agpl-3.0 |
rajrohith/blobstore | azure/storage/table/tableservice.py | 1 | 53120 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from contextlib import contextmanager
from azure.common import (
AzureHttpError,
)
from .._common_conversion import (
_int_to_str,
_to_str,
)
from .._error import (
_dont_fail_not_exist,
_dont_fail_on_exist,
_validate_not_none,
_ERROR_STORAGE_MISSING_INFO,
_validate_access_policies,
)
from .._serialization import (
_get_request_body,
_update_request,
_convert_signed_identifiers_to_xml,
_convert_service_properties_to_xml,
)
from .._http import HTTPRequest
from ..models import (
Services,
ListGenerator,
_OperationContext,
)
from .models import TablePayloadFormat
from .._auth import (
_StorageSASAuthentication,
_StorageTableSharedKeyAuthentication,
)
from .._connection import _ServiceParameters
from .._deserialization import (
_convert_xml_to_service_properties,
_convert_xml_to_signed_identifiers,
_convert_xml_to_service_stats,
)
from ._serialization import (
_convert_table_to_json,
_convert_batch_to_json,
_update_storage_table_header,
_get_entity_path,
_DEFAULT_ACCEPT_HEADER,
_DEFAULT_CONTENT_TYPE_HEADER,
_DEFAULT_PREFER_HEADER,
)
from ._deserialization import (
_convert_json_response_to_entity,
_convert_json_response_to_tables,
_convert_json_response_to_entities,
_parse_batch_response,
_extract_etag,
)
from .._constants import (
SERVICE_HOST_BASE,
DEFAULT_PROTOCOL,
DEV_ACCOUNT_NAME,
)
from ._request import (
_get_entity,
_insert_entity,
_update_entity,
_merge_entity,
_delete_entity,
_insert_or_replace_entity,
_insert_or_merge_entity,
)
from ..sharedaccesssignature import (
SharedAccessSignature,
)
from ..storageclient import StorageClient
from .tablebatch import TableBatch
class TableService(StorageClient):
'''
This is the main class managing Azure Table resources.
The Azure Table service offers structured storage in the form of tables. Tables
store data as collections of entities. Entities are similar to rows. An entity
has a primary key and a set of properties. A property is a name, typed-value pair,
similar to a column. The Table service does not enforce any schema for tables,
so two entities in the same table may have different sets of properties. Developers
may choose to enforce a schema on the client side. A table may contain any number
of entities.
:ivar object key_encryption_key:
The key-encryption-key optionally provided by the user. If provided, will be used to
encrypt/decrypt in supported methods.
For methods requiring decryption, either the key_encryption_key OR the resolver must be provided.
If both are provided, the resolver will take precedence.
Must implement the following methods for APIs requiring encryption:
wrap_key(key)--wraps the specified key (bytes) using an algorithm of the user's choice. Returns the encrypted key as bytes.
get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key.
get_kid()--returns a string key id for this key-encryption-key.
Must implement the following methods for APIs requiring decryption:
unwrap_key(key, algorithm)--returns the unwrapped form of the specified symmetric key using the string-specified algorithm.
get_kid()--returns a string key id for this key-encryption-key.
:ivar function key_resolver_function(kid):
A function to resolve keys optionally provided by the user. If provided, will be used to decrypt in supported methods.
For methods requiring decryption, either the key_encryption_key OR
the resolver must be provided. If both are provided, the resolver will take precedence.
It uses the kid string to return a key-encryption-key implementing the interface defined above.
:ivar function(partition_key, row_key, property_name) encryption_resolver_functions:
A function that takes in an entity's partition key, row key, and property name and returns
a boolean that indicates whether that property should be encrypted.
:ivar bool require_encryption:
A flag that may be set to ensure that all messages successfully uploaded to the queue and all those downloaded and
successfully read from the queue are/were encrypted while on the server. If this flag is set, all required
parameters for encryption/decryption must be provided. See the above comments on the key_encryption_key and resolver.
'''
def __init__(self, account_name=None, account_key=None, sas_token=None,
is_emulated=False, protocol=DEFAULT_PROTOCOL, endpoint_suffix=SERVICE_HOST_BASE,
request_session=None, connection_string=None, socket_timeout=None):
'''
:param str account_name:
The storage account name. This is used to authenticate requests
signed with an account key and to construct the storage endpoint. It
is required unless a connection string is given.
:param str account_key:
The storage account key. This is used for shared key authentication.
:param str sas_token:
A shared access signature token to use to authenticate requests
instead of the account key. If account key and sas token are both
specified, account key will be used to sign.
:param bool is_emulated:
Whether to use the emulator. Defaults to False. If specified, will
override all other parameters besides connection string and request
session.
:param str protocol:
The protocol to use for requests. Defaults to https.
:param str endpoint_suffix:
The host base component of the url, minus the account name. Defaults
to Azure (core.windows.net). Override this to use the China cloud
(core.chinacloudapi.cn).
:param requests.Session request_session:
The session object to use for http requests.
:param str connection_string:
If specified, this will override all other parameters besides
request session. See
http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/
for the connection string format.
:param int socket_timeout:
If specified, this will override the default socket timeout. The timeout specified is in seconds.
See DEFAULT_SOCKET_TIMEOUT in _constants.py for the default value.
'''
service_params = _ServiceParameters.get_service_parameters(
'table',
account_name=account_name,
account_key=account_key,
sas_token=sas_token,
is_emulated=is_emulated,
protocol=protocol,
endpoint_suffix=endpoint_suffix,
request_session=request_session,
connection_string=connection_string,
socket_timeout=socket_timeout)
super(TableService, self).__init__(service_params)
if self.account_key:
self.authentication = _StorageTableSharedKeyAuthentication(
self.account_name,
self.account_key,
)
elif self.sas_token:
self.authentication = _StorageSASAuthentication(self.sas_token)
else:
raise ValueError(_ERROR_STORAGE_MISSING_INFO)
self.require_encryption = False
self.key_encryption_key = None
self.key_resolver_function = None
self.encryption_resolver_function = None
def generate_account_shared_access_signature(self, resource_types, permission,
expiry, start=None, ip=None, protocol=None):
'''
Generates a shared access signature for the table service.
Use the returned signature with the sas_token parameter of TableService.
:param ResourceTypes resource_types:
Specifies the resource types that are accessible with the account SAS.
:param AccountPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_account(Services.TABLE, resource_types, permission,
expiry, start=start, ip=ip, protocol=protocol)
def generate_table_shared_access_signature(self, table_name, permission=None,
expiry=None, start=None, id=None,
ip=None, protocol=None,
start_pk=None, start_rk=None,
end_pk=None, end_rk=None):
'''
Generates a shared access signature for the table.
Use the returned signature with the sas_token parameter of TableService.
:param str table_name:
The name of the table to create a SAS token for.
:param TablePermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use :func:`~set_table_acl`.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip='168.1.5.65' or sip='168.1.5.60-168.1.5.70' on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str start_pk:
The minimum partition key accessible with this shared access
signature. startpk must accompany startrk. Key values are inclusive.
If omitted, there is no lower bound on the table entities that can
be accessed.
:param str start_rk:
The minimum row key accessible with this shared access signature.
startpk must accompany startrk. Key values are inclusive. If
omitted, there is no lower bound on the table entities that can be
accessed.
:param str end_pk:
The maximum partition key accessible with this shared access
signature. endpk must accompany endrk. Key values are inclusive. If
omitted, there is no upper bound on the table entities that can be
accessed.
:param str end_rk:
The maximum row key accessible with this shared access signature.
endpk must accompany endrk. Key values are inclusive. If omitted,
there is no upper bound on the table entities that can be accessed.
:return: A Shared Access Signature (sas) token.
:rtype: str
'''
_validate_not_none('table_name', table_name)
_validate_not_none('self.account_name', self.account_name)
_validate_not_none('self.account_key', self.account_key)
sas = SharedAccessSignature(self.account_name, self.account_key)
return sas.generate_table(
table_name,
permission=permission,
expiry=expiry,
start=start,
id=id,
ip=ip,
protocol=protocol,
start_pk=start_pk,
start_rk=start_rk,
end_pk=end_pk,
end_rk=end_rk,
)
def get_table_service_stats(self, timeout=None):
'''
Retrieves statistics related to replication for the Table service. It is
only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The table service stats.
:rtype: :class:`~azure.storage.models.ServiceStats`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(primary=False, secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'stats',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_stats)
def get_table_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Table service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The table service properties.
:rtype: :class:`~azure.storage.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_properties)
def set_table_service_properties(self, logging=None, hour_metrics=None,
minute_metrics=None, cors=None, timeout=None):
'''
Sets the properties of a storage account's Table service, including
Azure Storage Analytics. If an element (ex Logging) is left as None, the
existing settings on the service for that functionality are preserved.
For more information on Azure Storage Analytics, see
https://msdn.microsoft.com/en-us/library/azure/hh343270.aspx.
:param Logging logging:
The logging settings provide request logs.
:param Metrics hour_metrics:
The hour metrics settings provide a summary of request
statistics grouped by API in hourly aggregates for tables.
:param Metrics minute_metrics:
The minute metrics settings provide request statistics
for each minute for tables.
:param cors:
You can include up to five CorsRule elements in the
list. If an empty list is specified, all CORS rules will be deleted,
and CORS will be disabled for the service. For detailed information
about CORS rules and evaluation logic, see
https://msdn.microsoft.com/en-us/library/azure/dn535601.aspx.
:type cors: list of :class:`~azure.storage.models.CorsRule`
:param int timeout:
The server timeout, expressed in seconds.
'''
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
request.body = _get_request_body(
_convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors))
self._perform_request(request)
def list_tables(self, num_results=None, marker=None, timeout=None):
'''
Returns a generator to list the tables. The generator will lazily follow
the continuation tokens returned by the service and stop when all tables
have been returned or num_results is reached.
If num_results is specified and the account has more than that number of
tables, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param int num_results:
The maximum number of tables to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if num_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.models.table.Table` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`:
'''
operation_context = _OperationContext(location_lock=True)
kwargs = {'max_results': num_results, 'marker': marker, 'timeout': timeout,
'_context': operation_context}
resp = self._list_tables(**kwargs)
return ListGenerator(resp, self._list_tables, (), kwargs)
def _list_tables(self, max_results=None, marker=None, timeout=None, _context=None):
'''
Returns a list of tables under the specified account. Makes a single list
request to the service. Used internally by the list_tables method.
:param int max_results:
The maximum number of tables to return. A single list request may
return up to 1000 tables and potentially a continuation token which
should be followed to get additional resutls.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
tables. The marker value is opaque to the client.
:type marker: obj
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of tables, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.models.table.Table`:
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/Tables'
request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA}
request.query = {
'$top': _int_to_str(max_results),
'NextTableName': _to_str(marker),
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_json_response_to_tables,
operation_context=_context)
def create_table(self, table_name, fail_on_exist=False, timeout=None):
'''
Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table', table_name)
request = HTTPRequest()
request.method = 'POST'
request.host_locations = self._get_host_locations()
request.path = '/Tables'
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {
_DEFAULT_CONTENT_TYPE_HEADER[0]: _DEFAULT_CONTENT_TYPE_HEADER[1],
_DEFAULT_PREFER_HEADER[0]: _DEFAULT_PREFER_HEADER[1],
_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]
}
request.body = _get_request_body(_convert_table_to_json(table_name))
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def exists(self, table_name, timeout=None):
'''
Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/Tables' + "('" + table_name + "')"
request.headers = {'Accept': TablePayloadFormat.JSON_NO_METADATA}
request.query = {'timeout': _int_to_str(timeout)}
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
def delete_table(self, table_name, fail_not_exist=False, timeout=None):
'''
Deletes the specified table and any data it contains.
When a table is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The table is later removed from
the Table service during garbage collection.
Note that deleting a table is likely to take at least 40 seconds to complete.
If an operation is attempted against the table while it was being deleted,
an :class:`AzureConflictHttpError` will be thrown.
:param str table_name:
The name of the table to delete.
:param bool fail_not_exist:
Specifies whether to throw an exception if the table doesn't exist.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was deleted. If fail_not_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = '/Tables(\'' + _to_str(table_name) + '\')'
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]}
if not fail_not_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_table_acl(self, table_name, timeout=None):
'''
Returns details about any stored access policies specified on the
table that may be used with Shared Access Signatures.
:param str table_name:
The name of an existing table.
:param int timeout:
The server timeout, expressed in seconds.
:return: A dictionary of access policies associated with the table.
:rtype: dict of str to :class:`~azure.storage.models.AccessPolicy`:
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name)
request.query = {
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_signed_identifiers)
def set_table_acl(self, table_name, signed_identifiers=None, timeout=None):
'''
Sets stored access policies for the table that may be used with Shared
Access Signatures.
When you set permissions for a table, the existing permissions are replaced.
To update the table's permissions, call :func:`~get_table_acl` to fetch
all access policies associated with the table, modify the access policy
that you wish to change, and then call this function with the complete
set of data to perform the update.
When you establish a stored access policy on a table, it may take up to
30 seconds to take effect. During this interval, a shared access signature
that is associated with the stored access policy will throw an
:class:`AzureHttpError` until the access policy becomes active.
:param str table_name:
The name of an existing table.
:param signed_identifiers:
A dictionary of access policies to associate with the table. The
dictionary may contain up to 5 elements. An empty dictionary
will clear the access policies set on the service.
:type signed_identifiers: dict of str to :class:`~azure.storage.models.AccessPolicy`
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
_validate_access_policies(signed_identifiers)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = '/' + _to_str(table_name)
request.query = {
'comp': 'acl',
'timeout': _int_to_str(timeout),
}
request.body = _get_request_body(
_convert_signed_identifiers_to_xml(signed_identifiers))
self._perform_request(request)
def query_entities(self, table_name, filter=None, select=None, num_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or max_results is
reached.
If max_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`
'''
operation_context = _OperationContext(location_lock=True)
if self.key_encryption_key is not None or self.key_resolver_function is not None:
# If query already requests all properties, no need to add the metadata columns
if select is not None and select != '*':
select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'
args = (table_name,)
kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout,
'_context': operation_context}
resp = self._query_entities(*args, **kwargs)
return ListGenerator(resp, self._query_entities, args, kwargs)
def _query_entities(self, table_name, filter=None, select=None, max_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None, _context=None):
'''
Returns a list of entities under the specified table. Makes a single list
request to the service. Used internally by the query_entities method.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int top:
The maximum number of entities to return.
:param marker:
A dictionary which identifies the portion of the query to be
returned with the next query operation. The operation returns a
next_marker element within the response body if the list returned
was not complete. This value may then be used as a query parameter
in a subsequent call to request the next portion of the list of
table. The marker value is opaque to the client.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of entities, potentially with a next_marker property.
:rtype: list of :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
_validate_not_none('accept', accept)
next_partition_key = None if marker is None else marker.get('nextpartitionkey')
next_row_key = None if marker is None else marker.get('nextrowkey')
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/' + _to_str(table_name) + '()'
request.headers = {'Accept': _to_str(accept)}
request.query = {
'$filter': _to_str(filter),
'$select': _to_str(select),
'$top': _int_to_str(max_results),
'NextPartitionKey': _to_str(next_partition_key),
'NextRowKey': _to_str(next_row_key),
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_json_response_to_entities,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function],
operation_context=_context)
def commit_batch(self, table_name, batch, timeout=None):
'''
Commits a :class:`~azure.storage.table.TableBatch` request.
:param str table_name:
The name of the table to commit the batch to.
:param TableBatch batch:
The batch to commit.
:param int timeout:
The server timeout, expressed in seconds.
:return: A list of the batch responses corresponding to the requests in the batch.
:rtype: list of response objects
'''
_validate_not_none('table_name', table_name)
# Construct the batch request
request = HTTPRequest()
request.method = 'POST'
request.host_locations = self._get_host_locations()
request.path = '/' + '$batch'
request.query = {'timeout': _int_to_str(timeout)}
# Update the batch operation requests with table and client specific info
for row_key, batch_request in batch._requests:
if batch_request.method == 'POST':
batch_request.path = '/' + _to_str(table_name)
else:
batch_request.path = _get_entity_path(table_name, batch._partition_key, row_key)
if self.is_emulated:
batch_request.path = '/' + DEV_ACCOUNT_NAME + batch_request.path
_update_request(batch_request)
# Construct the batch body
request.body, boundary = _convert_batch_to_json(batch._requests)
request.headers = {'Content-Type': boundary}
# Perform the batch request and return the response
return self._perform_request(request, _parse_batch_response)
@contextmanager
def batch(self, table_name, timeout=None):
'''
Creates a batch object which can be used as a context manager. Commits the batch on exit.
:param str table_name:
The name of the table to commit the batch to.
:param int timeout:
The server timeout, expressed in seconds.
'''
batch = TableBatch(self.require_encryption, self.key_encryption_key, self.encryption_resolver_function)
yield batch
self.commit_batch(table_name, batch, timeout=timeout)
def get_entity(self, table_name, partition_key, row_key, select=None,
accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Get an entity from the specified table. Throws if the entity does not exist.
:param str table_name:
The name of the table to get the entity from.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str select:
Returns only the desired properties of an entity from the set.
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds.
:return: The retrieved entity.
:rtype: :class:`~azure.storage.table.models.Entity`
'''
_validate_not_none('table_name', table_name)
request = _get_entity(partition_key, row_key, select, accept)
request.host_locations = self._get_host_locations(secondary=True)
request.path = _get_entity_path(table_name, partition_key, row_key)
request.query['timeout'] = _int_to_str(timeout)
return self._perform_request(request, _convert_json_response_to_entity,
[property_resolver, self.require_encryption,
self.key_encryption_key, self.key_resolver_function])
def insert_entity(self, table_name, entity, timeout=None):
'''
Inserts a new entity into the table. Throws if an entity with the same
PartitionKey and RowKey already exists.
When inserting an entity into a table, you must specify values for the
PartitionKey and RowKey system properties. Together, these properties
form the primary key and must be unique within the table. Both the
PartitionKey and RowKey values must be string values; each key value may
be up to 64 KB in size. If you are using an integer value for the key
value, you should convert the integer to a fixed-width string, because
they are canonically sorted. For example, you should convert the value
1 to 0000001 to ensure proper sorting.
:param str table_name:
The name of the table to insert the entity into.
:param entity:
The entity to insert. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the inserted entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_entity(entity, self.require_encryption, self.key_encryption_key,
self.encryption_resolver_function)
request.host_locations = self._get_host_locations()
request.path = '/' + _to_str(table_name)
request.query['timeout'] = _int_to_str(timeout)
return self._perform_request(request, _extract_etag)
def update_entity(self, table_name, entity, if_match='*', timeout=None):
'''
Updates an existing entity in a table. Throws if the entity does not exist.
The update_entity operation replaces the entire entity and can be used to
remove properties.
:param str table_name:
The name of the table containing the entity to update.
:param entity:
The entity to update. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The update operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional update, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _update_entity(entity, if_match, self.require_encryption, self.key_encryption_key,
self.encryption_resolver_function)
request.host_locations = self._get_host_locations()
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
request.query['timeout'] = _int_to_str(timeout)
return self._perform_request(request, _extract_etag)
def merge_entity(self, table_name, entity, if_match='*', timeout=None):
'''
Updates an existing entity by merging the entity's properties. Throws
if the entity does not exist.
This operation does not replace the existing entity as the update_entity
operation does. A property cannot be removed with merge_entity.
Any properties with null values are ignored. All other properties will be
updated or added.
:param str table_name:
The name of the table containing the entity to merge.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _merge_entity(entity, if_match, self.require_encryption,
self.key_encryption_key)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag)
def delete_entity(self, table_name, partition_key, row_key,
if_match='*', timeout=None):
'''
Deletes an existing entity in a table. Throws if the entity does not exist.
When an entity is successfully deleted, the entity is immediately marked
for deletion and is no longer accessible to clients. The entity is later
removed from the Table service during garbage collection.
:param str table_name:
The name of the table containing the entity to delete.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
'''
_validate_not_none('table_name', table_name)
request = _delete_entity(partition_key, row_key, if_match)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, partition_key, row_key)
self._perform_request(request)
def insert_or_replace_entity(self, table_name, entity, timeout=None):
'''
Replaces an existing entity or inserts a new entity if it does not
exist in the table. Because this operation can insert or update an
entity, it is also known as an "upsert" operation.
If insert_or_replace_entity is used to replace an entity, any properties
from the previous entity will be removed if the new entity does not define
them.
:param str table_name:
The name of the table in which to insert or replace the entity.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_or_replace_entity(entity, self.require_encryption, self.key_encryption_key,
self.encryption_resolver_function)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag)
def insert_or_merge_entity(self, table_name, entity, timeout=None):
'''
Merges an existing entity or inserts a new entity if it does not exist
in the table.
If insert_or_merge_entity is used to merge an entity, any properties from
the previous entity will be retained if the request does not define or
include them.
:param str table_name:
The name of the table in which to insert or merge the entity.
:param entity:
The entity to insert or merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: a dict or :class:`~azure.storage.table.models.Entity`
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _insert_or_merge_entity(entity, self.require_encryption,
self.key_encryption_key)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag)
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None):
_update_storage_table_header(request)
return super(TableService, self)._perform_request(request, parser, parser_args, operation_context) | apache-2.0 |
smallyear/linuxLearn | salt/salt/modules/jboss7_cli.py | 1 | 14807 | # -*- coding: utf-8 -*-
'''
Module for low-level interaction with JbossAS7 through CLI.
This module exposes two ways of interaction with the CLI, either through commands or operations.
.. note:: Following JBoss documentation (https://developer.jboss.org/wiki/CommandLineInterface):
"Operations are considered a low level but comprehensive way to manage the AS controller, i.e. if it can't be done with operations it can't be done in any other way.
Commands, on the other hand, are more user-friendly in syntax,
although most of them still translate into operation requests and some of them even into a few
composite operation requests, i.e. commands also simplify some management operations from the user's point of view."
The difference between calling a command or operation is in handling the result.
Commands return a zero return code if operation is successful or return non-zero return code and
print an error to standard output in plain text, in case of an error.
Operations return a json-like structure, that contain more information about the result.
In case of a failure, they also return a specific return code. This module parses the output from the operations and
returns it as a dictionary so that an execution of an operation can then be verified against specific errors.
In order to run each function, jboss_config dictionary with the following properties must be passed:
* cli_path: the path to jboss-cli script, for example: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
* controller: the IP address and port of controller, for example: 10.11.12.13:9999
* cli_user: username to connect to jboss administration console if necessary
* cli_password: password to connect to jboss administration console if necessary
Example:
.. code-block:: yaml
jboss_config:
cli_path: '/opt/jboss/jboss-7.0/bin/jboss-cli.sh'
controller: 10.11.12.13:9999
cli_user: 'jbossadm'
cli_password: 'jbossadm'
'''
# Import Python libs
from __future__ import absolute_import
import logging
import re
import pprint
import time
# Import Salt libs
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def run_command(jboss_config, command, fail_on_error=True):
'''
Execute a command against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
command
Command to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionException exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_command '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_command
'''
cli_command_result = __call_cli(jboss_config, command)
if cli_command_result['retcode'] == 0:
cli_command_result['success'] = True
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_command_result['success'] = False
return cli_command_result
def run_operation(jboss_config, operation, fail_on_error=True, retries=1):
'''
Execute an operation against jboss instance through the CLI interface.
jboss_config
Configuration dictionary with properties specified above.
operation
An operation to execute against jboss instance
fail_on_error (default=True)
Is true, raise CommandExecutionException exception if execution fails.
If false, 'success' property of the returned dictionary is set to False
retries:
Number of retries in case of "JBAS012144: Could not connect to remote" error.
CLI Example:
.. code-block:: bash
salt '*' jboss7_cli.run_operation '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' my_operation
'''
cli_command_result = __call_cli(jboss_config, operation, retries)
if cli_command_result['retcode'] == 0:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = cli_result['outcome'] == 'success'
else:
raise CommandExecutionError('Operation has returned unparseable output: {0}'.format(cli_command_result['stdout']))
else:
if _is_cli_output(cli_command_result['stdout']):
cli_result = _parse(cli_command_result['stdout'])
cli_result['success'] = False
match = re.search(r'^(JBAS\d+):', cli_result['failure-description'])
cli_result['err_code'] = match.group(1)
else:
if fail_on_error:
raise CommandExecutionError('''Command execution failed, return code={retcode}, stdout='{stdout}', stderr='{stderr}' '''.format(**cli_command_result))
else:
cli_result = {
'success': False,
'stdout': cli_command_result['stdout'],
'stderr': cli_command_result['stderr'],
'retcode': cli_command_result['retcode']
}
return cli_result
def __call_cli(jboss_config, command, retries=1):
command_segments = [
jboss_config['cli_path'],
'--connect',
'--controller="{0}"'.format(jboss_config['controller'])
]
if 'cli_user' in six.iterkeys(jboss_config):
command_segments.append('--user="{0}"'.format(jboss_config['cli_user']))
if 'cli_password' in six.iterkeys(jboss_config):
command_segments.append('--password="{0}"'.format(jboss_config['cli_password']))
command_segments.append('--command="{0}"'.format(__escape_command(command)))
cli_script = ' '.join(command_segments)
cli_command_result = __salt__['cmd.run_all'](cli_script)
log.debug('cli_command_result=%s', str(cli_command_result))
log.debug('========= STDOUT:\n%s', cli_command_result['stdout'])
log.debug('========= STDERR:\n%s', cli_command_result['stderr'])
log.debug('========= RETCODE: %d', cli_command_result['retcode'])
if cli_command_result['retcode'] == 127:
raise CommandExecutionError('Could not execute jboss-cli.sh script. Have you specified server_dir variable correctly?\nCurrent CLI path: {cli_path}. '.format(cli_path=jboss_config['cli_path']))
if cli_command_result['retcode'] == 1 and 'Unable to authenticate against controller' in cli_command_result['stderr']:
raise CommandExecutionError('Could not authenticate against controller, please check username and password for the management console. Err code: {retcode}, stdout: {stdout}, stderr: {stderr}'.format(**cli_command_result))
# It may happen that eventhough server is up it may not respond to the call
if cli_command_result['retcode'] == 1 and 'JBAS012144' in cli_command_result['stderr'] and retries > 0: # Cannot connect to cli
log.debug('Command failed, retrying... (%d tries left)', retries)
time.sleep(3)
return __call_cli(jboss_config, command, retries - 1)
return cli_command_result
def __escape_command(command):
'''
This function escapes the command so that can be passed in the command line to JBoss CLI.
Escaping commands passed to jboss is extremely confusing.
If you want to save a binding that contains a single backslash character read the following explanation.
A sample value, let's say "a\b" (with single backslash), that is saved in the config.xml file:
<bindings>
<simple name="java:/app/binding1" value="a\b"/>
</bindings>
Eventhough it is just a single "\" if you want to read it from command line you will get:
/opt/jboss/jboss-eap-6.0.1/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":read-resource"
{
"outcome" => "success",
"result" => {
"binding-type" => "simple",
"value" => "a\\b"
}
}
So, now you have two backslashes in the output, even though in the configuration file you have one.
Now, if you want to update this property, the easiest thing to do is to create a file with appropriate command:
/tmp/update-binding.cli:
----
/subsystem=naming/binding="java:/app/binding1":write-attribute(name=value, value="a\\\\b")
----
And run cli command:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --file="/tmp/update-binding.cli"
As you can see, here you need 4 backslashes to save it as one to the configuration file. Run it and go to the configuration file to check.
(You may need to reload jboss afterwards: ${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command=":reload" )
But if you want to run the same update operation directly from command line, prepare yourself for more escaping:
${JBOSS_HOME}/bin/jboss-cli.sh --connect --controller=ip_addr:9999 --user=user --password=pass --command="/subsystem=naming/binding=\"java:/app/binding1\":write-attribute(name=value, value=\"a\\\\\\\\b\")"
So, here you need 8 backslashes to force JBoss to save it as one.
To sum up this behavior:
(1) 1 backslash in configuration file
(2) 2 backslashes when reading
(3) 4 backslashes when writing from file
(4) 8 backslashes when writing from command line
... are all the same thing:)
Remember that the command that comes in is already (3) format. Now we need to escape it further to be able to pass it to command line.
'''
result = command.replace('\\', '\\\\') # replace \ -> \\
result = result.replace('"', '\\"') # replace " -> \"
return result
def _is_cli_output(text):
cli_re = re.compile(r"^\s*{.+}\s*$", re.DOTALL)
if cli_re.search(text):
return True
else:
return False
def _parse(cli_output):
tokens = __tokenize(cli_output)
result = __process_tokens(tokens)
log.debug("=== RESULT: "+pprint.pformat(result))
return result
def __process_tokens(tokens):
result, token_no = __process_tokens_internal(tokens)
return result
def __process_tokens_internal(tokens, start_at=0):
if __is_dict_start(tokens[start_at]) and start_at == 0: # the top object
return __process_tokens_internal(tokens, start_at=1)
log.debug("__process_tokens, start_at="+str(start_at))
token_no = start_at
result = {}
current_key = None
while token_no < len(tokens):
token = tokens[token_no]
log.debug("PROCESSING TOKEN %d: %s", token_no, token)
if __is_quoted_string(token):
log.debug(" TYPE: QUOTED STRING ")
if current_key is None:
current_key = __get_quoted_string(token)
log.debug(" KEY: %s", current_key)
else:
result[current_key] = __get_quoted_string(token)
log.debug(" %s -> %s", current_key, result[current_key])
current_key = None
elif __is_datatype(token):
log.debug(" TYPE: DATATYPE: %s ", token)
result[current_key] = __get_datatype(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_boolean(token):
log.debug(" TYPE: BOOLEAN ")
result[current_key] = __get_boolean(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_int(token):
log.debug(" TYPE: INT ")
result[current_key] = __get_int(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_long(token):
log.debug(" TYPE: LONG ")
result[current_key] = __get_long(token)
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_undefined(token):
log.debug(" TYPE: UNDEFINED ")
log.debug(" %s -> undefined (Adding as None to map)", current_key)
result[current_key] = None
current_key = None
elif __is_dict_start(token):
log.debug(" TYPE: DICT START")
dict_value, token_no = __process_tokens_internal(tokens, start_at=token_no+1)
log.debug(" DICT = %s ", dict_value)
result[current_key] = dict_value
log.debug(" %s -> %s", current_key, str(result[current_key]))
current_key = None
elif __is_dict_end(token):
log.debug(" TYPE: DICT END")
return result, token_no
elif __is_assignment(token):
log.debug(" TYPE: ASSIGNMENT")
is_assignment = True
else:
raise CommandExecutionError('Unknown token! Token: {0}'.format(token))
token_no = token_no + 1
def __tokenize(cli_output):
# add all possible tokens here
# \\ means a single backslash here
tokens_re = re.compile(r'("(?:[^"\\]|\\"|\\\\)*"|=>|{|}|true|false|undefined|[0-9A-Za-z]+)', re.DOTALL)
tokens = tokens_re.findall(cli_output)
log.debug("tokens=%s", str(tokens))
return tokens
def __is_dict_start(token):
return token == '{'
def __is_dict_end(token):
return token == '}'
def __is_boolean(token):
return token == 'true' or token == 'false'
def __get_boolean(token):
return token == 'true'
def __is_int(token):
return token.isdigit()
def __get_int(token):
return int(token)
def __is_long(token):
return token[0:-1].isdigit() and token[-1] == 'L'
def __get_long(token):
if six.PY2:
return long(token[0:-1])
else:
return int(token[0:-1])
def __is_datatype(token):
return token in ("INT", "BOOLEAN", "STRING", "OBJECT")
def __get_datatype(token):
return token
def __is_undefined(token):
return token == 'undefined'
def __is_quoted_string(token):
return token[0] == '"' and token[-1] == '"'
def __get_quoted_string(token):
result = token[1:-1] # remove quotes
result = result.replace('\\\\', '\\') # unescape the output, by default all the string are escaped in the output
return result
def __is_assignment(token):
return token == '=>'
| apache-2.0 |
jgrocha/QGIS | python/plugins/grassprovider/ext/r_null.py | 45 | 2083 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_null.py
---------------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def checkParameterValuesBeforeExecuting(alg, parameters, context):
""" Verify if we have the right parameters """
if (alg.parameterAsString(parameters, 'setnull', context)
or alg.parameterAsString(parameters, 'null', context)):
return True, None
return False, alg.tr("You need to set at least 'setnull' or 'null' parameters for this algorithm!")
def processInputs(alg, parameters, context, feedback):
"""Prepare the GRASS import commands"""
if 'map' in alg.exportedLayers:
return
# We need to import without r.external
alg.loadRasterLayerFromParameter('map', parameters, context, False)
alg.postInputs(context)
def processCommand(alg, parameters, context, feedback):
# We temporary remove the output 'sequence'
alg.processCommand(parameters, context, feedback, True)
def processOutputs(alg, parameters, context, feedback):
fileName = alg.parameterAsOutputLayer(parameters, 'output', context)
grassName = alg.exportedLayers['map']
alg.exportRasterLayer(grassName, fileName, False)
| gpl-2.0 |
sunclx/anki | tests/test_collection.py | 19 | 3795 | # coding: utf-8
import os, tempfile
from tests.shared import assertException, getEmptyCol
from anki.stdmodels import addBasicModel
from anki import Collection as aopen
newPath = None
newMod = None
def test_create():
global newPath, newMod
(fd, path) = tempfile.mkstemp(suffix=".anki2", prefix="test_attachNew")
try:
os.close(fd)
os.unlink(path)
except OSError:
pass
deck = aopen(path)
# for open()
newPath = deck.path
deck.close()
newMod = deck.mod
del deck
def test_open():
deck = aopen(newPath)
assert deck.mod == newMod
deck.close()
def test_openReadOnly():
# non-writeable dir
assertException(Exception,
lambda: aopen("/attachroot.anki2"))
# reuse tmp file from before, test non-writeable file
os.chmod(newPath, 0)
assertException(Exception,
lambda: aopen(newPath))
os.chmod(newPath, 0666)
os.unlink(newPath)
def test_noteAddDelete():
deck = getEmptyCol()
# add a note
f = deck.newNote()
f['Front'] = u"one"; f['Back'] = u"two"
n = deck.addNote(f)
assert n == 1
# test multiple cards - add another template
m = deck.models.current(); mm = deck.models
t = mm.newTemplate("Reverse")
t['qfmt'] = "{{Back}}"
t['afmt'] = "{{Front}}"
mm.addTemplate(m, t)
mm.save(m)
# the default save doesn't generate cards
assert deck.cardCount() == 1
# but when templates are edited such as in the card layout screen, it
# should generate cards on close
mm.save(m, templates=True)
assert deck.cardCount() == 2
# creating new notes should use both cards
f = deck.newNote()
f['Front'] = u"three"; f['Back'] = u"four"
n = deck.addNote(f)
assert n == 2
assert deck.cardCount() == 4
# check q/a generation
c0 = f.cards()[0]
assert "three" in c0.q()
# it should not be a duplicate
assert not f.dupeOrEmpty()
# now let's make a duplicate
f2 = deck.newNote()
f2['Front'] = u"one"; f2['Back'] = u""
assert f2.dupeOrEmpty()
# empty first field should not be permitted either
f2['Front'] = " "
assert f2.dupeOrEmpty()
def test_fieldChecksum():
deck = getEmptyCol()
f = deck.newNote()
f['Front'] = u"new"; f['Back'] = u"new2"
deck.addNote(f)
assert deck.db.scalar(
"select csum from notes") == int("c2a6b03f", 16)
# changing the val should change the checksum
f['Front'] = u"newx"
f.flush()
assert deck.db.scalar(
"select csum from notes") == int("302811ae", 16)
def test_addDelTags():
deck = getEmptyCol()
f = deck.newNote()
f['Front'] = u"1"
deck.addNote(f)
f2 = deck.newNote()
f2['Front'] = u"2"
deck.addNote(f2)
# adding for a given id
deck.tags.bulkAdd([f.id], "foo")
f.load(); f2.load()
assert "foo" in f.tags
assert "foo" not in f2.tags
# should be canonified
deck.tags.bulkAdd([f.id], "foo aaa")
f.load()
assert f.tags[0] == "aaa"
assert len(f.tags) == 2
def test_timestamps():
deck = getEmptyCol()
assert len(deck.models.models) == 4
for i in range(100):
addBasicModel(deck)
assert len(deck.models.models) == 104
def test_furigana():
deck = getEmptyCol()
mm = deck.models
m = mm.current()
# filter should work
m['tmpls'][0]['qfmt'] = '{{kana:Front}}'
mm.save(m)
n = deck.newNote()
n['Front'] = 'foo[abc]'
deck.addNote(n)
c = n.cards()[0]
assert c.q().endswith("abc")
# and should avoid sound
n['Front'] = 'foo[sound:abc.mp3]'
n.flush()
assert "sound:" in c.q(reload=True)
# it shouldn't throw an error while people are editing
m['tmpls'][0]['qfmt'] = '{{kana:}}'
mm.save(m)
c.q(reload=True)
| agpl-3.0 |
alexproca/askbot-devel | askbot/utils/functions.py | 9 | 6664 | import re
import random
import datetime
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils.html import escape
def get_from_dict_or_object(source, key):
try:
return source[key]
except:
return getattr(source, key)
def enumerate_string_list(strings):
"""for a list or a tuple ('one', 'two',) return
a list formatted as ['1) one', '2) two',]
"""
numbered_strings = enumerate(strings, start = 1)
return [ '%d) %s' % item for item in numbered_strings ]
def pad_string(text):
"""Inserts one space between words,
including one space before the first word
and after the last word.
String without words is collapsed to ''
"""
words = text.strip().split()
if len(words) > 0:
return ' ' + ' '.join(words) + ' '
else:
return ''
def split_list(text):
"""Takes text, representing a loosely formatted
list (comma, semicolon, empty space separated
words) and returns a list() of words.
"""
text = text.replace(',', ' ').replace(';', ' ')
return text.strip().split()
def is_iterable(thing):
if hasattr(thing, '__iter__'):
return True
else:
return isinstance(thing, basestring)
BOT_REGEX = re.compile(
r'bot|http|\.com|crawl|spider|python|curl|yandex'
)
BROWSER_REGEX = re.compile(
r'^(Mozilla.*(Gecko|KHTML|MSIE|Presto|Trident)|Opera).*$'
)
MOBILE_REGEX = re.compile(
r'(BlackBerry|HTC|LG|MOT|Nokia|NOKIAN|PLAYSTATION|PSP|SAMSUNG|SonyEricsson)'
)
def strip_plus(text):
"""returns text with redundant spaces replaced with just one,
and stripped leading and the trailing spaces"""
return re.sub('\s+', ' ', text).strip()
def not_a_robot_request(request):
if 'HTTP_ACCEPT_LANGUAGE' not in request.META:
return False
user_agent = request.META.get('HTTP_USER_AGENT', None)
if user_agent is None:
return False
if BOT_REGEX.match(user_agent, re.IGNORECASE):
return False
if MOBILE_REGEX.match(user_agent):
return True
if BROWSER_REGEX.search(user_agent):
return True
return False
def diff_date(date, use_on_prefix = False):
now = datetime.datetime.now()#datetime(*time.localtime()[0:6])#???
diff = now - date
days = diff.days
hours = int(diff.seconds/3600)
minutes = int(diff.seconds/60)
if days > 2:
if date.year == now.year:
date_token = date.strftime("%b %d")
else:
date_token = date.strftime("%b %d '%y")
if use_on_prefix:
return _('on %(date)s') % { 'date': date_token }
else:
return date_token
elif days == 2:
return _('2 days ago')
elif days == 1:
return _('yesterday')
elif minutes >= 60:
return ungettext(
'%(hr)d hour ago',
'%(hr)d hours ago',
hours
) % {'hr':hours}
else:
return ungettext(
'%(min)d min ago',
'%(min)d mins ago',
minutes
) % {'min':minutes}
#todo: this function may need to be removed to simplify the paginator functionality
LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 5
LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 4
NUM_PAGES_OUTSIDE_RANGE = 1
ADJACENT_PAGES = 2
def setup_paginator(context):
"""
custom paginator tag
Inspired from http://blog.localkinegrinds.com/2007/09/06/digg-style-pagination-in-django/
"""
if (context["is_paginated"]):
" Initialize variables "
in_leading_range = in_trailing_range = False
pages_outside_leading_range = pages_outside_trailing_range = range(0)
if (context["pages"] <= LEADING_PAGE_RANGE_DISPLAYED):
in_leading_range = in_trailing_range = True
page_numbers = [n for n in range(1, context["pages"] + 1) if n > 0 and n <= context["pages"]]
elif (context["current_page_number"] <= LEADING_PAGE_RANGE):
in_leading_range = True
page_numbers = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1) if n > 0 and n <= context["pages"]]
pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
elif (context["current_page_number"] > context["pages"] - TRAILING_PAGE_RANGE):
in_trailing_range = True
page_numbers = [n for n in range(context["pages"] - TRAILING_PAGE_RANGE_DISPLAYED + 1, context["pages"] + 1) if n > 0 and n <= context["pages"]]
pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]
else:
page_numbers = [n for n in range(context["current_page_number"] - ADJACENT_PAGES, context["current_page_number"] + ADJACENT_PAGES + 1) if n > 0 and n <= context["pages"]]
pages_outside_leading_range = [n + context["pages"] for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]
pages_outside_trailing_range = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]
page_object = context['page_object']
#patch for change in django 1.5
if page_object.has_previous():
previous_page_number = page_object.previous_page_number()
else:
previous_page_number = None
if page_object.has_next():
next_page_number = page_object.next_page_number()
else:
next_page_number = None
return {
"base_url": escape(context["base_url"]),
"is_paginated": context["is_paginated"],
"previous": previous_page_number,
"has_previous": page_object.has_previous(),
"next": next_page_number,
"has_next": page_object.has_next(),
"page": context["current_page_number"],
"pages": context["pages"],
"page_numbers": page_numbers,
"in_leading_range" : in_leading_range,
"in_trailing_range" : in_trailing_range,
"pages_outside_leading_range": pages_outside_leading_range,
"pages_outside_trailing_range": pages_outside_trailing_range,
}
def get_admin():
"""Returns an admin users, usefull for raising flags"""
try:
from django.contrib.auth.models import User
return User.objects.filter(is_superuser=True)[0]
except:
raise Exception('there is no admin users')
def generate_random_key(length=16):
"""return random string, length is number of characters"""
random.seed()
assert(isinstance(length, int))
format_string = '%0' + str(2*length) + 'x'
return format_string % random.getrandbits(length*8)
| gpl-3.0 |
guijomatos/SickRage | lib/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| gpl-3.0 |
bfeist/Apollo17 | Processing_Scripts/! Scripted used for previous transcript scrubbing steps/cleanPAOcsv.py | 1 | 3526 | import csv
import sys
import re
import os
def scrub_callsign(callsign):
#callsign = callsign.upper()
callsign = callsign.strip()
if callsign == "MCC":
callsign = "CC"
return callsign
readPageCounter = 1
curRow = 0
errorCount = 0
newTimestamp = 0
lastWasPAO = 0
output_file_name_and_path = "F:\ApolloGit\Apollo17\Processing_Output\A17_PAO_cleaned.csv"
output_file_name_and_path_PAO_only = "F:\ApolloGit\Apollo17\Processing_Output\A17_PAO_cleaned_pao_only.csv"
outputFile = open(output_file_name_and_path, "w")
PAOOnlyOutputFile = open(output_file_name_and_path_PAO_only, "w")
PAOOnlyOutputLine = ""
outputLine = ""
pageTitle = ""
callsignList = [ "LAUNCH CNTL", "CAPCOM", "PAO", "SC", "AMERICA", "CERNAN", "SCHMITT", "CHALLENGER", "RECOVERY", "SPEAKER", "GREEN", "ECKER", "BUTTS", "JONES", "EVANS", "HONEYSUCKLE" ]
for curFile in [ "A17_PAO.csv" ]:
inputFilePath = "F:\ApolloGit\Apollo17\OCR_Output\\" + curFile
reader = csv.reader(open(inputFilePath, "rU"), delimiter='|')
for row in reader:
curRow += 1
#print row[1]
if len(row) > 1:
if row[1].startswith("APOLLO 17 MISSION COMMENTARY") :
#pageTitle = "||" + row[1] + "|Page " + str(readPageCounter)
pageTitle = "||" + row[1]
readPageCounter += 1
#parse out mission elapsed time
cstTimeStr = row[1][row[1].index("CST") + 4:row[1].index("CST") + 10]
getTimeStr = row[1][row[1].index("GET") + 4:row[1].index("GET") + 10]
if getTimeStr.find("/") != -1 or getTimeStr.find("MC") != -1 or cstTimeStr.find("/") != -1 or cstTimeStr.find("MC") != -1:
#time must be to the left of 'GET'
getTimeStr = row[1][row[1].index("GET") -8 :row[1].index("GET")]
getTimeStr = max(getTimeStr.strip().split(" "), key=len).replace(",","").zfill(6) + ":00" #strip longest list item and remove commas - this is the timestamp
print str(curRow) + " " + getTimeStr
#outputFile.write("TIMESTAMP|" + row[1] + "\n")
newTimestamp = 1
elif row[1].startswith("END OF TAPE"):
pass #delete END OF TAPE rows
else:
if any(row[0].startswith(curCallsign) for curCallsign in callsignList):
#print "-----------------------" + "Callsign Found.\n"
outputFile.write("\n")
if newTimestamp == 1 :
outputLine = pageTitle + "\n" #print the complete typed pages title
outputLine += '{0}|{1}|{2}'.format(getTimeStr,row[0],row[1]) #print the first dialogue with the timestamp from the title
newTimestamp = 0
else :
#outputLine = '|{0}|{1}'.format(row[0],row[1]) #print a regular line of dialog with callsign
outputLine += '{0}|{1}|{2}'.format(getTimeStr,row[0],row[1]) #print a regular line of dialogue with the timestamp from the title
if row[0].startswith("PAO"):
PAOOnlyOutputFile.write("\n")
lastWasPAO = 1
PAOOnlyOutputLine = outputLine
else:
lastWasPAO = 0
else:
print str(curRow) + "-----------------------" + "Callsign Not Found. " + str(len(row)) + " " + str(row)
errorCount += 1
#print row
#print outputLine
pass
else:
#--only one record, so it must be a continuation of the previous line
outputLine += ' ' + row[0]
if lastWasPAO == 1:
PAOOnlyOutputLine += ' ' + row[0]
lastWasPAO = 0
#print str(curRow) + " concatted line: " + outputLine
#if readPageCounter % 60 == 0:
# break
outputFile.write(outputLine)
outputLine = ""
PAOOnlyOutputFile.write(PAOOnlyOutputLine)
PAOOnlyOutputLine = ""
#print outputLine
#outputFile.close() | agpl-3.0 |
chaincoin/chaincoin | test/functional/feature_maxuploadtarget.py | 27 | 6608 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxuploadtarget=800"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].sync_with_ping()
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].sync_with_ping()
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_message(getdata_request)
p2p_conns[2].sync_with_ping()
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_message(getdata_request)
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
if __name__ == '__main__':
MaxUploadTest().main()
| mit |
e-koch/pyspeckit | pyspeckit/spectrum/models/formaldehyde.py | 1 | 23649 | """
===========================
Formaldehyde cm-line fitter
===========================
This is a formaldehyde 1_11-1_10 / 2_12-2_11 fitter. It includes hyperfine
components of the formaldehyde lines and has both LTE and RADEX LVG based
models
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
import matplotlib.cbook as mpcb
import copy
from . import hyperfine
from ...specwarnings import warn
from astropy.extern.six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10', 'oneone_f01', 'oneone_f22', 'oneone_f21',
'oneone_f12', 'oneone_f11', 'twotwo_f11', 'twotwo_f12',
'twotwo_f21', 'twotwo_f32', 'twotwo_f33', 'twotwo_f22',
'twotwo_f23']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48847881e9,
'threethree': 28.97480e9,
}
line_strength_dict={
'oneone_f10': 4.,
'oneone_f01': 4.,
'oneone_f22': 15.,
'oneone_f21': 5.,
'oneone_f12': 5.,
'oneone_f11': 3.,
'twotwo_f11': 15.,
'twotwo_f12': 5.,
'twotwo_f21': 5.,
'twotwo_f32': 5.19,
'twotwo_f33': 41.48,
'twotwo_f22': 23.15,
'twotwo_f23': 5.19,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
relative_strength_total_degeneracy={
'oneone_f10': 36.,
'oneone_f01': 36.,
'oneone_f22': 36.,
'oneone_f21': 36.,
'oneone_f12': 36.,
'oneone_f11': 36.,
'twotwo_f11': 100.01,
'twotwo_f12': 100.01,
'twotwo_f21': 100.01,
'twotwo_f32': 100.01,
'twotwo_f33': 100.01,
'twotwo_f22': 100.01,
'twotwo_f23': 100.01,
'threethree_f22':3.0,
'threethree_f44':3.0,
'threethree_f33':3.0,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48847881e9 - 19.97e3,
'twotwo_f12':14.48847881e9 - 7.03e3,
'twotwo_f21':14.48847881e9 - 2.20e3,
'twotwo_f32':14.48847881e9 + 0.12e3,
'twotwo_f33':14.48847881e9 + 0.89e3,
'twotwo_f22':14.48847881e9 + 10.74e3,
'twotwo_f23':14.48847881e9 + 11.51e3,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict = copy.copy(hf_freq_dict)
freq_dict.update(central_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={ # opposite signs of freq offset
'oneone_f10': + 18.53e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f01': + 1.34e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f22': + 0.35e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f21': - 4.05e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f12': - 6.48e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f11': - 11.08e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'twotwo_f11': + 19.97e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f12': + 7.03e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f21': + 2.20e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f32': - 0.12e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f33': - 0.89e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f22': - 10.74e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f23': - 11.51e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
formaldehyde_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict,
relative_strength_total_degeneracy)
formaldehyde_vtau_fitter = formaldehyde_vtau.fitter
formaldehyde_vtau_vheight_fitter = formaldehyde_vtau.vheight_fitter
formaldehyde_vtau_tbg_fitter = formaldehyde_vtau.background_fitter
def formaldehyde_radex(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None, path_to_texgrid='',
path_to_taugrid='', temperature_gridnumber=3,
debug=False, verbose=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_radex_orthopara_temp(xarr, density=4, column=13,
orthopara=1.0, temperature=15.0,
xoff_v=0.0, width=1.0,
Tbackground1=2.73,
Tbackground2=2.73,
grid_vwidth=1.0,
grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None,
path_to_texgrid='', path_to_taugrid='',
debug=False, verbose=False,
getpars=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
else:
raise Exception
densityarr = (np.arange(taugrid[0].shape[3])+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (np.arange(taugrid[0].shape[2])+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
temparr = (np.arange(taugrid[0].shape[1])+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # temperature
oprarr = (np.arange(taugrid[0].shape[0])+hdr['CRPIX4']-1)*hdr['CDELT4']+hdr['CRVAL4'] # log ortho/para ratio
gridval1 = np.interp(density, densityarr, np.arange(len(densityarr)))
gridval2 = np.interp(column, columnarr, np.arange(len(columnarr)))
gridval3 = np.interp(temperature, temparr, np.arange(len(temparr)))
gridval4 = np.interp(orthopara, oprarr, np.arange(len(oprarr)))
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [slice(int(np.floor(gv)),int(np.floor(gv)+2))
for gv in (gridval4,gridval3,gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1, prefilter=False)
for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1,prefilter=False)
for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
# there can be different background temperatures at each frequency
tbg = [Tbackground1,Tbackground2]
if verbose:
print("density %20.12g column: %20.12g temperature: %20.12g opr: %20.12g xoff_v: %20.12g width: %20.12g" % (density, column, temperature, orthopara, xoff_v, width))
print("tau: ",tau," tex: ",tex)
print("minfreq: ",minfreq," maxfreq: ",maxfreq)
print("tbg: ",tbg)
if debug > 1:
import pdb; pdb.set_trace()
if getpars:
return tau,tex
spec = np.sum([(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
Tex=float(tex[ii]), tau=float(tau[ii]),
Tbackground=tbg[ii], xoff_v=xoff_v,
width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii])
* (xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, texscale=0.01, tau=0.01, **kwargs):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define
it to be Tex given tau=0.01 when passing to the fitter
The final spectrum is then rescaled to that value
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*texscale, tau=tau, xoff_v=xoff_v,
width=width,
return_tau=True,
return_hyperfine_components=return_hyperfine_components, **kwargs)
if return_hyperfine_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
def formaldehyde_pyradex(xarr, density=4, column=13, temperature=20,
xoff_v=0.0, opr=1.0, width=1.0, tbackground=2.73,
grid_vwidth=1.0, debug=False, verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
"""
raise NotImplementedError("Not done yet.")
import pyradex
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tb_nu_cumul = np.zeros(len(xarr))
R = pyradex.Radex(molecule='oh2co-h2', column=column,
temperature=temperature, density=10**density,
tbackground=tbackground,)
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
class formaldehyde_model(model.SpectralModel):
def formaldehyde_integral(self, modelpars, linename='oneone'):
"""
Return the integral of the individual components (ignoring height)
"""
raise NotImplementedError("Not implemented, but the integral is just amplitude * width * sqrt(2*pi)")
# produced by directly computing the integral of gaussians and formaldehydeians as a function of
# line width and then fitting that with a broken logarithmic power law
# The errors are <0.5% for all widths
formaldehyde_to_gaussian_ratio_coefs = {
'lt0.1_oneone': np.array([ -5.784020,-40.058798,-111.172706,-154.256411,-106.593122,-28.933119]),
'gt0.1_oneone': np.array([ 0.038548, -0.071162, -0.045710, 0.183828, -0.145429, 0.040039]),
'lt0.1_twotwo': np.array([ 1.156561, 6.638570, 11.782065, -0.429536,-24.860297,-27.902274, -9.510288]),
'gt0.1_twotwo': np.array([ -0.090646, 0.078204, 0.123181, -0.175590, 0.089506, -0.034687, 0.008676]),
}
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in np.reshape(modelpars,[len(modelpars)/3,3]):
gaussint = amp*width*np.sqrt(2.0*np.pi)
cftype = "gt0.1_"+linename if width > 0.1 else "lt0.1_"+linename
correction_factor = 10**np.polyval(formaldehyde_to_gaussian_ratio_coefs[cftype], np.log10(width) )
# debug statement print("Two components of the integral: amp %g, width %g, gaussint %g, correction_factor %g " % (amp,width,gaussint,correction_factor))
integ += gaussint*correction_factor
return integ
formaldehyde_fitter = formaldehyde_model(formaldehyde, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_vheight_fitter = formaldehyde_model(fitter.vheightmodel(formaldehyde), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
# Create a tau-only fit:
def formaldehyde_radex_tau(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False,
taugrid=None, hdr=None, path_to_taugrid='',
temperature_gridnumber=3, debug=False,
verbose=False, return_hyperfine_components=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
* uses hyperfine components
* assumes *tau* varies but *tex* does not!
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if verbose:
print("Parameters: dens=%f, column=%f, xoff=%f, width=%f" % (density, column, xoff_v, width))
if taugrid is None:
if path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif hdr is not None:
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
# let the hyperfine module determine the hyperfine components, and pass all of them here
spec_components = [(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
tau=float(tau[ii]), xoff_v=xoff_v, width=width,
return_tau=True, return_hyperfine_components=True, **kwargs) *
(xarr.as_unit('GHz')>minfreq[ii]) *
(xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tau))]
# get an array of [n_lines, n_hyperfine, len(xarr)]
if return_hyperfine_components:
return np.array(spec_components).sum(axis=0)
else:
return np.sum(spec_components, axis=0).sum(axis=0)
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| mit |
bullocke/ROI_Plugin | help/source/conf.py | 2 | 7088 | # -*- coding: utf-8 -*-
#
# ROITool documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ROITool'
copyright = u'2013, Eric Bullock, Chris Holden'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_TemplateModuleNames = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TemplateClassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ROITool.tex', u'ROITool Documentation',
u'Eric Bullock, Chris Holden', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'TemplateClass', u'ROITool Documentation',
[u'Eric Bullock, Chris Holden'], 1)
]
| gpl-2.0 |
takeshineshiro/nova | nova/api/openstack/urlmap.py | 39 | 10464 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_log import log as logging
import paste.urlmap
import six
if six.PY3:
from urllib import request as urllib2
else:
import urllib2
from nova.api.openstack import wsgi
LOG = logging.getLogger(__name__)
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.get_supported_content_types():
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.get_supported_content_types())
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['nova.best_content_type'] = mime_type
return app(environ, start_response)
LOG.debug('Could not find application for %s', environ['PATH_INFO'])
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
| apache-2.0 |
sdague/home-assistant | tests/components/datadog/test_init.py | 5 | 5392 | """The tests for the Datadog component."""
from unittest import mock
import homeassistant.components.datadog as datadog
from homeassistant.const import (
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, patch
from tests.common import assert_setup_component
async def test_invalid_config(hass):
"""Test invalid configuration."""
with assert_setup_component(0):
assert not await async_setup_component(
hass, datadog.DOMAIN, {datadog.DOMAIN: {"host1": "host1"}}
)
async def test_datadog_setup_full(hass):
"""Test setup with all data."""
config = {datadog.DOMAIN: {"host": "host", "port": 123, "rate": 1, "prefix": "foo"}}
hass.bus.listen = MagicMock()
with patch("homeassistant.components.datadog.initialize") as mock_init, patch(
"homeassistant.components.datadog.statsd"
):
assert await async_setup_component(hass, datadog.DOMAIN, config)
assert mock_init.call_count == 1
assert mock_init.call_args == mock.call(statsd_host="host", statsd_port=123)
assert hass.bus.listen.called
assert EVENT_LOGBOOK_ENTRY == hass.bus.listen.call_args_list[0][0][0]
assert EVENT_STATE_CHANGED == hass.bus.listen.call_args_list[1][0][0]
async def test_datadog_setup_defaults(hass):
"""Test setup with defaults."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize") as mock_init, patch(
"homeassistant.components.datadog.statsd"
):
assert await async_setup_component(
hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"port": datadog.DEFAULT_PORT,
"prefix": datadog.DEFAULT_PREFIX,
}
},
)
assert mock_init.call_count == 1
assert mock_init.call_args == mock.call(statsd_host="host", statsd_port=8125)
assert hass.bus.listen.called
async def test_logbook_entry(hass):
"""Test event listener."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize"), patch(
"homeassistant.components.datadog.statsd"
) as mock_statsd:
assert await async_setup_component(
hass,
datadog.DOMAIN,
{datadog.DOMAIN: {"host": "host", "rate": datadog.DEFAULT_RATE}},
)
assert hass.bus.listen.called
handler_method = hass.bus.listen.call_args_list[0][0][1]
event = {
"domain": "automation",
"entity_id": "sensor.foo.bar",
"message": "foo bar biz",
"name": "triggered something",
}
handler_method(mock.MagicMock(data=event))
assert mock_statsd.event.call_count == 1
assert mock_statsd.event.call_args == mock.call(
title="Home Assistant",
text="%%% \n **{}** {} \n %%%".format(event["name"], event["message"]),
tags=["entity:sensor.foo.bar", "domain:automation"],
)
mock_statsd.event.reset_mock()
async def test_state_changed(hass):
"""Test event listener."""
hass.bus.listen = mock.MagicMock()
with patch("homeassistant.components.datadog.initialize"), patch(
"homeassistant.components.datadog.statsd"
) as mock_statsd:
assert await async_setup_component(
hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"prefix": "ha",
"rate": datadog.DEFAULT_RATE,
}
},
)
assert hass.bus.listen.called
handler_method = hass.bus.listen.call_args_list[1][0][1]
valid = {"1": 1, "1.0": 1.0, STATE_ON: 1, STATE_OFF: 0}
attributes = {"elevation": 3.2, "temperature": 5.0, "up": True, "down": False}
for in_, out in valid.items():
state = mock.MagicMock(
domain="sensor",
entity_id="sensor.foo.bar",
state=in_,
attributes=attributes,
)
handler_method(mock.MagicMock(data={"new_state": state}))
assert mock_statsd.gauge.call_count == 5
for attribute, value in attributes.items():
value = int(value) if isinstance(value, bool) else value
mock_statsd.gauge.assert_has_calls(
[
mock.call(
f"ha.sensor.{attribute}",
value,
sample_rate=1,
tags=[f"entity:{state.entity_id}"],
)
]
)
assert mock_statsd.gauge.call_args == mock.call(
"ha.sensor",
out,
sample_rate=1,
tags=[f"entity:{state.entity_id}"],
)
mock_statsd.gauge.reset_mock()
for invalid in ("foo", "", object):
handler_method(
mock.MagicMock(data={"new_state": ha.State("domain.test", invalid, {})})
)
assert not mock_statsd.gauge.called
| apache-2.0 |
schmidsi/django-registration | registration/forms.py | 1 | 3698 | """
Forms and validation code for user registration.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserCreationForm
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(UserCreationForm):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters (without umlaute), digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
email = forms.EmailField(widget=forms.TextInput(
attrs=dict(attrs_dict, maxlength=75)),
label=_("Email address"))
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={'required': _("You must agree to the terms to register")})
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email']
| bsd-3-clause |
herzi/ccc | python/demo/fifteen_grid.py | 1 | 2765 |
try:
import cccanvas
except:
import sys
sys.path.insert(0,"./../.libs/")
import cccanvas
import fifteen_item
import random
import gtk
import pango
TILE_SIZE = 50.0
class FifteenGrid(cccanvas.Rectangle):
def __init__(self, font_desc):
global TITLE_SIZE
cccanvas.Rectangle.__init__(self)
self.elements = []
self.font_description = font_desc
color = cccanvas.color_new_rgb(0,0,0)
brush = cccanvas.BrushColor(color)
self.set_brush_border (brush)
self.set_position (0.0, 0.0, 4*TILE_SIZE, 4*TILE_SIZE)
self.set_grid_aligned(True)
for i in range(15):
self.elements.append(fifteen_item.FifteenItem(i+1))
self.elements[i].set_grid_aligned(True)
self.elements[i].set_brush_border (brush)
self.append(self.elements[i])
self.elements[i].move(i, TILE_SIZE)
self.elements[i].connect("button-press-event", self.fg_element_clicked)
self.elements[i].set_font(self.font_description)
self.elements.append(None)
def fg_position (self, item):
for i in range(16):
if self.elements[i] == item:
return i
return False
def fg_element_clicked(self, item, view, event):
can_move = False
direction = None
move_value = { gtk.DIR_UP : -4,
gtk.DIR_RIGHT : 1,
gtk.DIR_DOWN : 4,
gtk.DIR_LEFT : -1 }
i = self.fg_position(item)
if ( i%4 and not self.elements[i-1]):
can_move = True
direction = gtk.DIR_LEFT;
elif ((i%4 < 3) and not self.elements[i+1]):
can_move = True
direction = gtk.DIR_RIGHT
elif ((i/4 > 0) and not self.elements[i-4]):
can_move = True
direction = gtk.DIR_UP
elif ((i/4 < 3) and not self.elements[i+4]):
can_move = True
direction = gtk.DIR_DOWN
if (can_move):
new_i = i + move_value[direction]
self.elements[new_i] = item
self.elements[i] = None
item.move(new_i, TILE_SIZE)
return (not can_move)
def scramble(self):
global TILE_SIZE
elements = {};
for i in range(16, 0, -1):
picked = random.randint(0,i-1)
elements[i-1] = self.elements[picked]
self.elements[picked] = self.elements[i-1]
for i in range(16):
self.elements[i] = elements[i]
if (elements[i]):
elements[i].move(i, TILE_SIZE)
| lgpl-2.1 |
spasal/fall-detector | core/fall_detection.py | 1 | 2634 | import numpy as np
class FallDetector():
def __init__(self):
self._mean_vecs = []
self._mean_vecs.append([]), self._mean_vecs.append([])
self._delta_pcas = []
self._angle_pcas = []
self._angle_pcas.append([]), self._angle_pcas.append([])
self.var_length = 30
def calculate_values(self, mean_vec, delta_pca, vector_angles):
self.__update_values(mean_vec, delta_pca, vector_angles)
self.__calculate_mean_values()
print("mean_vec: %s delta_pca: %s pcas: %s" % (self.mean_direction_diff_vec, self.mean_delta_pca, self.mean_anlge_pcas))
return self.mean_direction_diff_vec, self.mean_delta_pca, self.mean_anlge_pcas
def __calculate_mean_values(self):
# python mean difference of list items
def calculate_differences(seq):
return [j-i for i,j in zip(seq[:-1], seq[1:])]
def calculate_mean_vec(self):
v1, v2 = calculate_differences(self._mean_vecs[0]), calculate_differences(self._mean_vecs[1])
self.mean_direction_diff_vec = [np.mean(v1), np.mean(v2)]
def calculate_delta_pca(self):
# v1 = calculate_differences(self._delta_pcas)
self.mean_delta_pca = np.mean(self._delta_pcas)
def calculate_pca(self):
v1, v2 = calculate_differences(self._angle_pcas[0]), calculate_differences(self._angle_pcas[1])
self.mean_anlge_pcas = [np.mean(v1), np.mean(v2)]
calculate_mean_vec(self)
calculate_delta_pca(self)
calculate_pca(self)
def __update_values(self, mean_vec, delta_pca, vector_angles):
# append
self.__push_propval(self._mean_vecs, mean_vec[0], True) # mean_vec is arr in arr
self.__push_propval(self._delta_pcas, delta_pca)
self.__push_propval(self._angle_pcas, vector_angles, True)
# pop if neceserry
self.__pop_propval(self._mean_vecs, True)
self.__pop_propval(self._delta_pcas)
self.__pop_propval(self._angle_pcas, True)
def __pop_propval(self, prop, is_pair=False):
if len(prop) > self.var_length:
if not is_pair:
prop.pop(0)
else:
prop[0].pop(0)
prop[1].pop(0)
def __push_propval(self, prop, val, is_pair=False):
if not is_pair:
prop.append(val)
else:
prop[0].append(val[0])
prop[1].append(val[1])
# print("prop: %s prop0: %s prop1: %s val: %s val1: %s val2: %s" % (prop, prop[0], prop[1], val, val[0], val[1]))
fall_detection = FallDetector()
| mit |
karthik-sethuraman/Snowmass-ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_notification_createnotificationsubscriptionservice_input.py | 4 | 3924 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_notification_subscription_filter import TapiNotificationSubscriptionFilter # noqa: F401,E501
from tapi_server.models.tapi_notification_subscription_state import TapiNotificationSubscriptionState # noqa: F401,E501
from tapi_server import util
class TapiNotificationCreatenotificationsubscriptionserviceInput(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, subscription_state=None, subscription_filter=None): # noqa: E501
"""TapiNotificationCreatenotificationsubscriptionserviceInput - a model defined in OpenAPI
:param subscription_state: The subscription_state of this TapiNotificationCreatenotificationsubscriptionserviceInput. # noqa: E501
:type subscription_state: TapiNotificationSubscriptionState
:param subscription_filter: The subscription_filter of this TapiNotificationCreatenotificationsubscriptionserviceInput. # noqa: E501
:type subscription_filter: TapiNotificationSubscriptionFilter
"""
self.openapi_types = {
'subscription_state': TapiNotificationSubscriptionState,
'subscription_filter': TapiNotificationSubscriptionFilter
}
self.attribute_map = {
'subscription_state': 'subscription-state',
'subscription_filter': 'subscription-filter'
}
self._subscription_state = subscription_state
self._subscription_filter = subscription_filter
@classmethod
def from_dict(cls, dikt) -> 'TapiNotificationCreatenotificationsubscriptionserviceInput':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.notification.createnotificationsubscriptionservice.Input of this TapiNotificationCreatenotificationsubscriptionserviceInput. # noqa: E501
:rtype: TapiNotificationCreatenotificationsubscriptionserviceInput
"""
return util.deserialize_model(dikt, cls)
@property
def subscription_state(self):
"""Gets the subscription_state of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:return: The subscription_state of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:rtype: TapiNotificationSubscriptionState
"""
return self._subscription_state
@subscription_state.setter
def subscription_state(self, subscription_state):
"""Sets the subscription_state of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:param subscription_state: The subscription_state of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:type subscription_state: TapiNotificationSubscriptionState
"""
self._subscription_state = subscription_state
@property
def subscription_filter(self):
"""Gets the subscription_filter of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:return: The subscription_filter of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:rtype: TapiNotificationSubscriptionFilter
"""
return self._subscription_filter
@subscription_filter.setter
def subscription_filter(self, subscription_filter):
"""Sets the subscription_filter of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:param subscription_filter: The subscription_filter of this TapiNotificationCreatenotificationsubscriptionserviceInput.
:type subscription_filter: TapiNotificationSubscriptionFilter
"""
self._subscription_filter = subscription_filter
| apache-2.0 |
tdsymonds/relativity | relativity/special/special_relativity.py | 1 | 1128 | # -*- coding: utf-8 -*-
from __future__ import division
from relativity import constants
import math
class LorentzFactor(object):
@staticmethod
def get_beta(velocity, is_percent):
if is_percent:
return velocity
return velocity / constants.SPEED_OF_LIGHT
@staticmethod
def lorentz_factor(velocity, is_percent):
beta = LorentzFactor.get_beta(velocity, is_percent)
return 1 / (math.sqrt(1 - beta ** 2))
class TimeDilation(LorentzFactor):
@staticmethod
def get_proper_time(time, velocity, is_percent=True):
return time * TimeDilation.lorentz_factor(velocity, is_percent)
@staticmethod
def get_time_relative_ex_observer(time, velocity, is_percent=True):
"""
Dilation relative to an external observer
"""
return time / TimeDilation.lorentz_factor(velocity, is_percent)
class LengthContradiction(LorentzFactor):
@staticmethod
def get_proper_length(length, velocity, is_percent=True):
return length / LengthContradiction.lorentz_factor(velocity, is_percent)
| mit |
ArianeFire/HaniCam | facerec-master/py/facerec/dataset.py | 1 | 1275 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
class NumericDataSet(object):
def __init__(self):
self.data = {}
self.str_to_num_mapping = {}
self.num_to_str_mapping = {}
def add(self, label, image):
try:
self.data[label].append(image)
except:
self.data[label] = [image]
numerical_identifier = len(self.str_to_num_mapping)
# Store in mapping tables:
self.str_to_num_mapping[label] = numerical_identifier
self.num_to_str_mapping[numerical_identifier] = label
def get(self):
X = []
y = []
for name, num in self.str_to_num_mapping.iteritems():
for image in self.data[name]:
X.append(image)
y.append(num)
return X,y
def resolve_by_str(self, label):
return self.str_to_num_mapping[label]
def resolve_by_num(self, numerical_identifier):
return self.num_to_str_mapping[numerical_identifier]
def length(self):
return len(self.data)
def __repr__(self):
print "NumericDataSet"
| mit |
xjchensz/LSFS | LSFS/LSFS_FUN.py | 1 | 5171 | #!usr/bin/python
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import scipy as sp
import os
import random
import time
import sys
from LSFS_TEST import print_W
from EProjSimplex_new import *
def append_module_path():
import sys
paths = [ \
"../gen_data",
"../evaluate",
"../read_data"
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
append_module_path()
import gen_data
import evaluate
import read_data
def norm_2_1(a):
"""
对每行的向量求第二范数,然后对所有范数求和
"""
return np.sum(np.linalg.norm(a, ord = 2, axis=1))
def fun22_value(W, X, H, Q, Y):
"""
||H*X.T*W - H*Y||F范数 ^ 2 + gama * (W.T*Q*W)的迹
"""
gama = 10^-6
return np.linalg.norm(np.dot(np.dot(H, X.T), W) - np.dot(H, Y), ord = "fro")**2 + gama*np.trace(np.dot(np.dot(W.T, Q),W))
def fun8_value(X, Y, W, b):
"""
X : d x n
||X.T * W + 1*b.T - Y||的L2范数 ^ 2 + gama * ( ||W||的F范数 ^ 2 )
"""
gama = 10^-6
n = X.shape[1]
return np.linalg.norm( np.dot(X.T,W) + np.dot(np.ones((n, 1)),b.T) - Y , ord=2)**2 + gama*(np.linalg.norm( W , ord = "fro")**2)
def compute_W(X, Y, H, Q):
# gama = 10^-6
gama = 60
"""
W = (X*H*X.T + gama * Q)^-1 * X*H*Y
"""
W = np.dot( np.dot( np.dot( \
np.linalg.inv( np.dot( np.dot(X,H), X.T)+gama*Q ) \
, X), H), Y)
return W
def compute_H(n):
"""
I => n x n
1 => n x 1
H = I - 1/n * 1 * 1.T
"""
H = np.eye(n,n) - 1/n*np.ones((n,n))
return H
def compute_Q(W):
"""
q(ij) = ||W||2,1 / ||w^j||2
axis = 1 =》 对W的每一行求L2范数
np.linalg.norm(W, ord=2, axis = 1) =》 对每行求L2范数
"""
Q = norm_2_1(W) / np.linalg.norm(W, ord = 2, axis=1)
Q = np.diag(Q)
return Q
def get_W(X, Y):
"""
d特征,c类别,n样本数
X : (d x n)
Y : (n x c)
算法中使用的X的维度是(d x n)
"""
d, n = X.shape
c = Y.shape[1]
# Q初始化为一个单位矩阵
Q = np.eye(d)
# print(Q)
# print("====================")
# H矩阵不变,算一遍即可
H = compute_H(n)
W = compute_W(X, Y, H, Q)
Q = compute_Q(W)
pre_f = cur_f = fun22_value(W, X, H, Q, Y)
# print(W)
# print()
# print(Q)
# print("====================")
NITER = 900
epsilon = 10**-8
for i in range(NITER):
pre_f = cur_f
W = compute_W(X, Y, H, Q)
Q = compute_Q(W)
# print_W(W)
cur_f = fun22_value(W, X, H, Q, Y)
if abs((cur_f - pre_f) / cur_f) < epsilon:
break
return W
def compute_YU(X, W, b):
"""
X : (d x n)
"""
c = W.shape[1]
YU = np.zeros((X.shape[1], c))
# 对于每一个样本,维度 1 x d
for i in range(X.shape[1]):
"""
min ( ||(xi.T) * W + b.T - yi.T||的F范数 ^ 2 )
s.t. yi>=0, 1*yi=1
"""
ad = np.dot(X[:,i:i+1].T, W) + b.T
ad_new, ft = EProjSimplex_new(ad)
YU[i:i+1,:] = ad_new.A
return YU
def compute_b(X, Y, W):
"""
X : d x n
Y : n x c
W : d x c
b = 1/n * (Y.T * 1 - W.T * X * 1)
1 是 n x 1 维的全1矩阵
"""
n = X.shape[1]
b = 1/n*(np.dot(Y.T, np.ones((n,1))) - np.dot(np.dot(W.T, X), np.ones((n,1))))
return b
def get_new_X_Y_YU_W_f(X, Y, XL, YL, XU):
"""
X : d x n
Y : n x c
XL : nl x d
YL : nl x c
XU : nu x d
"""
# n = X.shape[1]
W = get_W(X, Y)
# print_W(W)
# b = 1/n*(np.dot(Y.T, np.ones((n,1))) - np.dot(np.dot(W.T, X), np.ones((n,1))))
b = compute_b(X, Y, W)
YU = compute_YU(XU.T, W, b)
X = sp.concatenate((XL, XU), axis = 0)
Y = sp.concatenate((YL, YU), axis = 0)
X = X.T
cur_f = fun8_value(X, Y, W, b)
return X, Y, YU, W, cur_f
def compute_thea(W):
"""
W : d x c
thea_j = ||w_j||2 / sum(||w_j||2)
j=1:d
"""
# 对W的每行求L2范数,再求和
W_L2_sum = np.sum(np.linalg.norm(W, ord=2, axis = 1))
# 对W的每行求L2范数
s = np.linalg.norm(W, ord=2, axis = 1) / W_L2_sum
return s
def lsfs(XL, YL, XU, output_file_name="feature_order"):
start_time = time.clock()
X, Y, YU, W, cur_f = get_new_X_Y_YU_W_f(XL.T, YL, XL, YL, XU)
print_W(W)
NITER = 100
epsilon = 10**-8
for i in range(NITER):
pre_f = cur_f
X, Y, YU, W, cur_f = get_new_X_Y_YU_W_f(X, Y, XL, YL, XU)
print_W(W)
# coverage
if abs((cur_f - pre_f) / cur_f) < epsilon:
break
s = compute_thea(W)
feature_order = list( np.argsort(s) )
feature_order = feature_order[::-1]
time_dual = time.clock() - start_time
with open(output_file_name, "w+") as result_file:
print("\n".join([str(w) for w in feature_order]), file=result_file)
return feature_order, time_dual
| gpl-3.0 |
vitovitolo/yama | yama/urlquery.py | 1 | 1027 | """
urlquery module
"""
import cache
import database
def compose_response(is_malware=False):
response = { "malware" : str(is_malware) }
return response
def exists_in_cache(url, cache_hostname):
return cache.exists(url, cache_hostname)
def exists_in_database(url, db_hostname, table_name, conf):
is_malware = database.query_url(url, db_hostname, table_name, conf)
if is_malware:
return True
else:
return False
def write_to_cache(url, hostname, conf):
return cache.write(url, hostname, conf)
def delete_from_cache(url, hostname):
return cache.delete(url, hostname)
def process_url(url, hostname, table_name, conf):
if exists_in_cache(url, hostname):
return compose_response(is_malware=True)
else:
is_malware = exists_in_database(url, hostname, table_name, conf)
if is_malware:
write_to_cache(url, hostname, conf)
return compose_response(is_malware)
else:
return compose_response(is_malware)
| mit |
ntt-pf-lab/backup_keystone | keystone/test/sampledata.py | 1 | 5138 | import keystone.manage
DEFAULT_FIXTURE = [
# Tenants
('tenant', 'add', '1234'),
('tenant', 'add', 'ANOTHER:TENANT'),
('tenant', 'add', '0000'),
('tenant', 'disable', '0000'),
# Users
('user', 'add', 'joeuser', 'secrete', '1234'),
('user', 'add', 'joeadmin', 'secrete', '1234'),
('user', 'add', 'admin', 'secrete'),
('user', 'add', 'serviceadmin', 'secrete', '1234'),
('user', 'add', 'disabled', 'secrete', '1234'),
('user', 'disable', 'disabled'),
# Roles
('role', 'add', 'Admin'),
('role', 'add', 'KeystoneServiceAdmin'),
('role', 'grant', 'Admin', 'admin'),
('role', 'grant', 'KeystoneServiceAdmin', 'serviceadmin'),
('role', 'grant', 'Admin', 'joeadmin', '1234'),
('role', 'grant', 'Admin', 'joeadmin', 'ANOTHER:TENANT'),
('role', 'add', 'Member'),
('role', 'grant', 'Member', 'joeuser', '1234'),
# Add Services
#1 Service Name:exampleservice Type:example type
('service', 'add', 'exampleservice',
'example type', 'example description'),
#2 Service Name:swift Type:object-store
('service', 'add', 'swift',
'object-store', 'Swift-compatible service'),
#3 Service Name:cdn Type:object-store
('service', 'add', 'cdn',
'object-store', 'Swift-compatible service'),
#4 Service Name:nova Type:compute
('service', 'add', 'nova',
'compute', 'OpenStack Compute Service'),
#5 Service Name:nova_compat Type:Compute
('service', 'add', 'nova_compat',
'compute', 'OpenStack Compute Service'),
#6 Service Name:glance Type:image-service
('service', 'add', 'glance',
'image-service', 'OpenStack Compute Service'),
#7 Service Name:glance Type:image-service
('service', 'add', 'identity',
'identity-service', 'OpenStack Compute Service'),
# Keeping for compatibility for a while till dashboard catches up
('endpointTemplates', 'add', 'RegionOne', '2',
'http://swift.publicinternets.com/v1/AUTH_%tenant_id%',
'http://swift.admin-nets.local:8080/',
'http://127.0.0.1:8080/v1/AUTH_%tenant_id%', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '5',
'http://nova.publicinternets.com/v1.0/',
'http://127.0.0.1:8774/v1.0', 'http://localhost:8774/v1.0', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '4',
'http://nova.publicinternets.com/v1.1/', 'http://127.0.0.1:8774/v1.1',
'http://localhost:8774/v1.1', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '6',
'http://glance.publicinternets.com/v1.1/%tenant_id%',
'http://nova.admin-nets.local/v1.1/%tenant_id%',
'http://127.0.0.1:9292/v1.1/%tenant_id%', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '3',
'http://cdn.publicinternets.com/v1.1/%tenant_id%',
'http://cdn.admin-nets.local/v1.1/%tenant_id%',
'http://127.0.0.1:7777/v1.1/%tenant_id%', '1', '0'),
# endpointTemplates
('endpointTemplates', 'add', 'RegionOne', '2',
'http://swift.publicinternets.com/v1/AUTH_%tenant_id%',
'http://swift.admin-nets.local:8080/',
'http://127.0.0.1:8080/v1/AUTH_%tenant_id%', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '4',
'http://nova.publicinternets.com/v1.0/', 'http://127.0.0.1:8774/v1.0',
'http://localhost:8774/v1.0', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '5',
'http://nova.publicinternets.com/v1.1/', 'http://127.0.0.1:8774/v1.1',
'http://localhost:8774/v1.1', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '6',
'http://glance.publicinternets.com/v1.1/%tenant_id%',
'http://nova.admin-nets.local/v1.1/%tenant_id%',
'http://127.0.0.1:9292/v1.1/%tenant_id%', '1', '0'),
('endpointTemplates', 'add', 'RegionOne', '3',
'http://cdn.publicinternets.com/v1.1/%tenant_id%',
'http://cdn.admin-nets.local/v1.1/%tenant_id%',
'http://127.0.0.1:7777/v1.1/%tenant_id%', '1', '0'),
# Global endpointTemplate
('endpointTemplates', 'add', 'RegionOne', '7',
'http://keystone.publicinternets.com/v2.0',
'http://127.0.0.1:5001/v2.0', 'http://127.0.0.1:5000/v2.0', '1', '1'),
# Tokens
('token', 'add', '887665443383838', 'joeuser', '1234', '2012-02-05T00:00'),
('token', 'add', '999888777666', 'admin', '1234', '2015-02-05T00:00'),
('token', 'add', '111222333444', 'serviceadmin', '1234',
'2015-02-05T00:00'),
('token', 'add', '000999', 'admin', '1234', '2010-02-05T00:00'),
('token', 'add', '999888777', 'disabled', '1234', '2015-02-05T00:00'),
# Tenant endpointsGlobal endpoint not added
('endpoint', 'add', '1234', '1'),
('endpoint', 'add', '1234', '2'),
('endpoint', 'add', '1234', '3'),
('endpoint', 'add', '1234', '4'),
('endpoint', 'add', '1234', '5'),
# Add Credentials
('credentials', 'add', 'admin', 'EC2', 'admin:admin', 'admin', '1'),
]
def load_fixture(fixture=DEFAULT_FIXTURE, args=None):
keystone.manage.parse_args(args)
for cmd in fixture:
keystone.manage.process(*cmd)
def main():
load_fixture()
if __name__ == '__main__':
main()
| apache-2.0 |
apllicationCOM/youtube-dl-api-server | youtube_dl_server/youtube_dl/extractor/khanacademy.py | 128 | 2740 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate,
)
class KhanAcademyIE(InfoExtractor):
_VALID_URL = r'^https?://(?:(?:www|api)\.)?khanacademy\.org/(?P<key>[^/]+)/(?:[^/]+/){,2}(?P<id>[^?#/]+)(?:$|[?#])'
IE_NAME = 'KhanAcademy'
_TESTS = [{
'url': 'http://www.khanacademy.org/video/one-time-pad',
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
'info_dict': {
'id': 'one-time-pad',
'ext': 'mp4',
'title': 'The one-time pad',
'description': 'The perfect cipher',
'duration': 176,
'uploader': 'Brit Cruise',
'uploader_id': 'khanacademy',
'upload_date': '20120411',
},
'add_ie': ['Youtube'],
}, {
'url': 'https://www.khanacademy.org/math/applied-math/cryptography',
'info_dict': {
'id': 'cryptography',
'title': 'Journey into cryptography',
'description': 'How have humans protected their secret messages through history? What has changed today?',
},
'playlist_mincount': 3,
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
if m.group('key') == 'video':
data = self._download_json(
'http://api.khanacademy.org/api/v1/videos/' + video_id,
video_id, 'Downloading video info')
upload_date = unified_strdate(data['date_added'])
uploader = ', '.join(data['author_names'])
return {
'_type': 'url_transparent',
'url': data['url'],
'id': video_id,
'title': data['title'],
'thumbnail': data['image_url'],
'duration': data['duration'],
'description': data['description'],
'uploader': uploader,
'upload_date': upload_date,
}
else:
# topic
data = self._download_json(
'http://api.khanacademy.org/api/v1/topic/' + video_id,
video_id, 'Downloading topic info')
entries = [
{
'_type': 'url',
'url': c['url'],
'id': c['id'],
'title': c['title'],
}
for c in data['children'] if c['kind'] in ('Video', 'Topic')]
return {
'_type': 'playlist',
'id': video_id,
'title': data['title'],
'description': data['description'],
'entries': entries,
}
| unlicense |
labase/impressious | tests/impressious_tests.py | 1 | 9494 | # -*- coding: UTF8 -*-
# Este arquivo é parte do programa Impressious
# Copyright 2013-2014 Carlo Oliveira <carlo@nce.ufrj.br>,
# `Labase <http://labase.selfip.org/>`__; `GPL <http://is.gd/3Udt>`__.
#
# Impressious é um software livre; você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF); na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, veja em <http://www.gnu.org/licenses/>
"""
############################################################
Impressious - Teste Principal
############################################################
Verifica a funcionalidade do cliente web.
"""
import unittest
from impressious.core import Impressious, Slide, Sprite, Menu, LOREM, DIM, GUI
from impressious import main
import sys
if sys.version_info[0] == 2:
from mock import MagicMock, patch, ANY
else:
from unittest.mock import MagicMock, patch, ANY
WIKI = "https://activufrj.nce.ufrj.br/rest/wiki/activlets/Provas_2014_2"
WCONT = '{"status": 0, "result": {"wikidata": {"conteudo": "<h1>Carlo Emmanoel Tolla de Oliveira<\/h1><ol>' \
'<li>Society is intrinsically<\/li><li> responsible for capitalism; says Sartre;<\/li>' \
'<li>Society is intrinsically<\/li><li> responsible for capitalism; says Sartre;<\/li>' \
'<li>Society is intrinsically<\/li><li> responsible for capitalism; says Sartre;<\/li>' \
'<li>Society is intrinsically<\/li><li> responsible for capitalism; says Sartre;<\/li>' \
'<li>Society is intrinsically<\/li><li> responsible for capitalism; says Sartre;<\/li>' \
'</ol>"}}}'
ICONT = 'Society is intrinsically'
class ImpressiousTest(unittest.TestCase):
class Evento:
x = y = 42
EV = Evento()
def setUp(self):
self.gui = MagicMock(name="gui")
modules = {
'urllib': self.gui,
'urllib.request': self.gui.request,
'urllib.request.urlopen': self.gui.request.urlopen
}
uop = MagicMock(name="file")
self.gui.request.urlopen.return_value = (uop, 0, 0)
uop.read = MagicMock(name="data", return_value=WCONT)
self.module_patcher = patch.dict('sys.modules', modules)
self.module_patcher.start()
self.gui.__le__ = MagicMock(name="APPEND")
self.gui.svg = self.gui.svg.svg = self.gui
self.gui.side_effect = lambda *a, **k: self.gui
self.gui.g = MagicMock(name="gui_g")
self.gui.g.__le__ = MagicMock(name="APPENDG")
self.gui.g.side_effect = lambda *a, **k: self.gui.g
self.gui.document.__getitem__.return_value = self.gui
self.app = Impressious(self.gui)
Impressious.SLIDES = []
self.EV.x = self.EV.y = 42
def test_main(self):
"""cria um canvas svg"""
imp = main(self.gui)
self.assertIsInstance(imp, Impressious, "Intância não criada")
self.gui.g.assert_called_with()
self.gui.assert_called_with(width=800, height=600)
self.assertEqual(imp.gui, self.gui)
self.assertIsInstance(imp.svgcanvas, MagicMock, "Svgcanvas is not as expected: %s" % imp.svgcanvas)
imp.svgcanvas.__le__.assert_called_with(self.gui.g)
def test_menu(self):
"""cria o menu do aplicativo"""
self.app.build_base()
self.assertIs(self.app.div, self.gui)
lclick = lambda e: self.gui.click()
# menu = dict(dash=dict(save=lclick, load=lclick))
menu = dict(save=lclick, load=lclick)
icon = Sprite("test.jpg", 37, 43, 12, 10, 2)
icons = icon.sprites(dash=[4, 1], save=[0, 0], load=[0, 0])
render = MagicMock(name="render")
style = {'border': '1px solid #d0d0d0', 'left': '0', 'bottom': '0', 'position': 'absolute',
'width': '37px', 'height': '86px', 'top': '0', 'display': "none", 'margin': 'auto'}
with patch("impressious.core.Sprite.render") as render:
self.app.build_menu(menu, icons)
self.gui.html.DIV.assert_called_with(Id='divdash', style=style, Class='deafault')
icon.render.assert_called_with(ANY, 0, 0, ANY, list(menu.keys())[1])
m = self.app.menu
self.assertIn("dash", Menu.MENU)
self.assertIsInstance(Menu.MENU["dash"].menu, MagicMock)
self.assertIsInstance(m, Menu, "Menu is not as expected: %s" % m)
def test_slide(self):
"""cria um slide com texto"""
self.app.build_base()
g = self.app.slide()
self.gui.svg.g.assert_called_with(transform='translate (10, 10)')
self.gui.svg.foreignObject.assert_called_with(x=0, y=0, width=DIM[0], height=DIM[1])
self.assertIsInstance(g, Slide, "Slide is not as expected: %s" % g)
def test_two_slide(self):
"""cria dois slides com texto"""
self.app.build_base()
self.app.slide()
g = self.app.slide()
dx = 200 # DIM[0] * 2 + 30
self.gui.svg.g.assert_called_with(transform='translate (200, 10)')
self.assertIsInstance(g, Slide, "Slide is not as expected: %s" % g)
def test_read_from_wiki(self):
"""le um texto da wiki"""
self.app.build_base()
w = self.app.read_wiki(WIKI)
self.assertIn(ICONT, w, "Wiki is not as expected: %s" % w)
def test_parse_from_wiki(self):
"""separa um texto da wiki em itens"""
self.app.build_base()
w = self.app.read_wiki(WIKI)
l = self.app.parse_wiki(w)
self.assertEqual(10, len(l), "list is not as expected: %s" % l)
self.assertNotIn('<li', l[0], "item is not as expected: %s" % l[0])
def test_load_slides_from_list(self):
"""separa um texto da wiki em itens"""
self.app.build_base()
w = self.app.read_wiki(WIKI)
l = self.app.parse_wiki(w)
self.assertEqual(10, len(l), "list is not as expected: %s" % l)
self.assertNotIn('<li', l[0], "item is not as expected: %s" % l[0])
def test_select_slide_and_show_cursor(self):
"""Seleciona um slide e mostra o cursor"""
self.app.build_base()
l = self.app.load_slides_from_wiki(['Society is intrinsically<\/li>'])
self.assertEqual(1, len(l), "list is not as expected: %s" % l)
self.app.SLIDES[0]._select(None)
self.gui.svg.rect.assert_called_with(
width=35, x=-35, style={'opacity': 0.5, 'fill': '#b3b3b3'}, transform='rotate (45 0 0)', height=35, y=-35)
self.gui.svg.g.assert_called_with(transform="translate (100 100)", Id='cursor')
def test_select_slide_and_switch_cursor(self):
"""Seleciona um slide e troca o cursor que estava em outro slide"""
self.app.build_base()
l = self.app.load_slides_from_wiki(['Society is intrinsically<\/li>', 'Society is intrinsically<\/li>'])
self.assertEqual(2, len(l), "list is not as expected: %s" % l)
self.app.SLIDES[0]._select(None)
self.app.SLIDES[1]._select(None)
self.gui.svg.g.assert_called_with(transform="translate (100 100)", Id='cursor')
self.gui.svg.g.setAttribute.assert_called_with('transform', "translate (290 100)")
#assert self.gui.svg.g.transform == 'translate (290 100)', self.gui.svg.g.transform
def _create_and_select_a_slide(self):
"""Cria e Seleciona um slide"""
self.app.build_base()
l = self.app.load_slides_from_wiki(['Society is intrinsically'])
slide = self.app.SLIDES[0]
slide._select(None)
return slide
def test_select_slide_move(self):
"""Seleciona um slide e move o slide junto com o cursor"""
self._create_and_select_a_slide()
self.app.cursor._cursor_start_move(self.EV, self.app.cursor._move_slide)
self.assertEqual(self.app.cursor._mouse_pos, (42, 42))
self.EV.x, self.EV.y = (84, 84)
self.app.cursor._move(self.EV)
self.assertEqual(self.app.cursor._mouse_pos, (84, 84), "but mouse pos is %d %d " % self.app.cursor._mouse_pos)
self.assertEqual(self.app.SLIDES[0].position, (-6, -6), "but slide pos is %d %d " % self.app.SLIDES[0].position)
self.gui.svg.g.setAttribute.assert_called_with('transform', "translate (-6 -6)")
#self.assertEqual(self.gui.svg.g.mock_calls, [], "but slide pos is %s " % self.gui.svg.g.mock_calls)
def test_select_widen_slide(self):
"""Seleciona um slide e aumenta a largura"""
self._create_and_select_a_slide()
self.app.cursor._cursor_start_move(self.EV, self.app.cursor._widen_slide)
self.assertEqual(self.app.cursor._mouse_pos, (42, 42))
self.EV.x, self.EV.y = (84, 84)
self.app.cursor._move(self.EV)
self.assertEqual(self.app.cursor._mouse_pos, (84, 84), "but mouse pos is %d %d " % self.app.cursor._mouse_pos)
self.assertEqual(self.app.SLIDES[0].dimension, (222, 180),
"but slide pos is %d %d " % self.app.SLIDES[0].position)
#self.assertEqual(self.gui.svg.g.mock_calls, [], "but slide pos is %s " % self.gui.svg.g.mock_calls)
self.gui.svg.rect().setAttribute.assert_called_with('width', "222")
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
zitouni/gnuradio-3.6.1 | gnuradio-core/src/python/gnuradio/gr/qa_integrate.py | 18 | 2228 | #!/usr/bin/env python
#
# Copyright 2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_integrate (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000_ss(self):
src_data = (1, 2, 3, 4, 5, 6)
dst_data = (6, 15)
src = gr.vector_source_s(src_data)
itg = gr.integrate_ss(3)
dst = gr.vector_sink_s()
self.tb.connect(src, itg, dst)
self.tb.run()
self.assertEqual(dst_data, dst.data())
def test_001_ii(self):
src_data = (1, 2, 3, 4, 5, 6)
dst_data = (6, 15)
src = gr.vector_source_i(src_data)
itg = gr.integrate_ii(3)
dst = gr.vector_sink_i()
self.tb.connect(src, itg, dst)
self.tb.run()
self.assertEqual(dst_data, dst.data())
def test_002_ff(self):
src_data = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
dst_data = [6.0, 15.0]
src = gr.vector_source_f(src_data)
itg = gr.integrate_ff(3)
dst = gr.vector_sink_f()
self.tb.connect(src, itg, dst)
self.tb.run()
self.assertFloatTuplesAlmostEqual(dst_data, dst.data(), 6)
def test_003_cc(self):
src_data = [1.0+1.0j, 2.0+2.0j, 3.0+3.0j, 4.0+4.0j, 5.0+5.0j, 6.0+6.0j]
dst_data = [6.0+6.0j, 15.0+15.0j]
src = gr.vector_source_c(src_data)
itg = gr.integrate_cc(3)
dst = gr.vector_sink_c()
self.tb.connect(src, itg, dst)
self.tb.run()
self.assertComplexTuplesAlmostEqual(dst_data, dst.data(), 6)
if __name__ == '__main__':
gr_unittest.run(test_integrate, "test_integrate.xml")
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/aiplatform/v1beta1/aiplatform-v1beta1-py/google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py | 1 | 1920 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import artifact
from google.cloud.aiplatform_v1beta1.types import event
from google.cloud.aiplatform_v1beta1.types import execution
__protobuf__ = proto.module(
package='google.cloud.aiplatform.v1beta1',
manifest={
'LineageSubgraph',
},
)
class LineageSubgraph(proto.Message):
r"""A subgraph of the overall lineage graph. Event edges connect
Artifact and Execution nodes.
Attributes:
artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]):
The Artifact nodes in the subgraph.
executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]):
The Execution nodes in the subgraph.
events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]):
The Event edges between Artifacts and
Executions in the subgraph.
"""
artifacts = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=artifact.Artifact,
)
executions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=execution.Execution,
)
events = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=event.Event,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
Verteiron/JContainers | JContainers/lib/boost/tools/build/v2/test/core_update_now.py | 45 | 6202 | #!/usr/bin/python
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import os
def basic():
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def ignore_minus_n():
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
echo updating target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def failed_target():
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions fail
{
exit 1
}
NOTFILE target1 ;
ALWAYS target1 ;
fail target1 ;
actions do-print
{
echo updating $(<)
}
NOTFILE target2 ;
do-print target2 ;
DEPENDS target2 : target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 target2 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
...found 2 targets...
...updating 1 target...
do-print target2
echo updating target2
...updated 1 target...
""")
t.cleanup()
def missing_target():
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target2 ;
do-print target2 ;
DEPENDS target2 : target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 target2 ;
""")
t.run_build_system(["-ffile.jam", "-n"], status=1, stdout="""\
don't know how to make target1
...found 1 target...
...can't find 1 target...
...found 2 targets...
...can't make 1 target...
""")
t.cleanup()
def build_once():
"""
Make sure that if we call UPDATE_NOW with ignore-minus-n, the target gets
updated exactly once regardless of previous calls to UPDATE_NOW with -n in
effect.
"""
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
echo updating target1
...updated 1 target...
do-print target1
echo updating target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def return_status():
"""
Make sure that UPDATE_NOW returns a failure status if
the target failed in a previous call to UPDATE_NOW
"""
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("file.jam", """\
actions fail
{
exit 1
}
NOTFILE target1 ;
ALWAYS target1 ;
fail target1 ;
ECHO update1: [ UPDATE_NOW target1 ] ;
ECHO update2: [ UPDATE_NOW target1 ] ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam"], status=1, stdout="""\
...found 1 target...
...updating 1 target...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
update1:
update2:
...found 1 target...
""")
t.cleanup()
def save_restore():
"""Tests that ignore-minus-n and ignore-minus-q are
local to the call to UPDATE_NOW"""
t = BoostBuild.Tester(pass_toolset=0, pass_d0=False)
t.write("actions.jam", """\
rule fail
{
NOTFILE $(<) ;
ALWAYS $(<) ;
}
actions fail
{
exit 1
}
rule pass
{
NOTFILE $(<) ;
ALWAYS $(<) ;
}
actions pass
{
echo updating $(<)
}
""")
t.write("file.jam", """
include actions.jam ;
fail target1 ;
fail target2 ;
UPDATE_NOW target1 target2 : : $(IGNORE_MINUS_N) : $(IGNORE_MINUS_Q) ;
fail target3 ;
fail target4 ;
UPDATE_NOW target3 target4 ;
UPDATE ;
""")
t.run_build_system(['-n', '-sIGNORE_MINUS_N=1', '-ffile.jam'],
stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
fail target2
exit 1
...failed fail target2...
...failed updating 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
fail target4
exit 1
...updated 2 targets...
''')
t.run_build_system(['-q', '-sIGNORE_MINUS_N=1', '-ffile.jam'],
status=1, stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
...failed fail target3...
...failed updating 1 target...
''')
t.run_build_system(['-n', '-sIGNORE_MINUS_Q=1', '-ffile.jam'],
stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
fail target2
exit 1
...updated 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
fail target4
exit 1
...updated 2 targets...
''')
t.run_build_system(['-q', '-sIGNORE_MINUS_Q=1', '-ffile.jam'],
status=1, stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
fail target2
exit 1
...failed fail target2...
...failed updating 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
...failed fail target3...
...failed updating 1 target...
''')
t.cleanup()
basic()
ignore_minus_n()
failed_target()
missing_target()
build_once()
return_status()
save_restore()
| mit |
QuantSoftware/QuantSoftwareToolkit | Examples/Basic/tutorial4.py | 5 | 2590 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: sourabhbajaj@gatech.edu
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cPickle
def main():
''' Main Function'''
# Start and End date of the charts
dt_start = dt.datetime(2004, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# List of symbols - First 20
ls_symbols = c_dataobj.get_symbols_from_list('sp5002012')
ls_symbols = ls_symbols[:20]
ls_symbols.append('_CASH')
# Creating the first allocation row
na_vals = np.random.randint(0, 1000, len(ls_symbols))
# Normalize the row - Typecasting as everything is int.
na_vals = na_vals / float(sum(na_vals))
# Reshape to a 2D matrix to append into dataframe.
na_vals = na_vals.reshape(1, -1)
# Creating Allocation DataFrames
df_alloc = pd.DataFrame(na_vals, index=[ldt_timestamps[0]],
columns=ls_symbols)
dt_last_date = ldt_timestamps[0]
# Looping through all dates and creating monthly allocations
for dt_date in ldt_timestamps[1:]:
if dt_last_date.month != dt_date.month:
# Create allocation
na_vals = np.random.randint(0, 1000, len(ls_symbols))
na_vals = na_vals / float(sum(na_vals))
na_vals = na_vals.reshape(1, -1)
# Append to the dataframe
df_new_row = pd.DataFrame(na_vals, index=[dt_date],
columns=ls_symbols)
df_alloc = df_alloc.append(df_new_row)
dt_last_date = dt_date
# Create the outpul pickle file for the dataframe.
output = open('allocation.pkl', 'wb')
cPickle.dump(df_alloc, output)
if __name__ == '__main__':
main()
| bsd-3-clause |
kisel/trex-core | scripts/external_libs/pyyaml-3.11/python3/yaml/representer.py | 238 | 13528 |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, sys, copyreg, types, base64
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=None):
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data in [None, ()]:
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object: %s" % data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent object: %r" % data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
| apache-2.0 |
alx/torrentflux | TF_BitTornado/BitTornado/CurrentRateMeasure.py | 15 | 1029 | # Written by Bram Cohen
# see LICENSE.txt for license information
from clock import clock
class Measure:
def __init__(self, max_rate_period, fudge = 1):
self.max_rate_period = max_rate_period
self.ratesince = clock() - fudge
self.last = self.ratesince
self.rate = 0.0
self.total = 0l
def update_rate(self, amount):
self.total += amount
t = clock()
self.rate = (self.rate * (self.last - self.ratesince) +
amount) / (t - self.ratesince + 0.0001)
self.last = t
if self.ratesince < t - self.max_rate_period:
self.ratesince = t - self.max_rate_period
def get_rate(self):
self.update_rate(0)
return self.rate
def get_rate_noupdate(self):
return self.rate
def time_until_rate(self, newrate):
if self.rate <= newrate:
return 0
t = clock() - self.ratesince
return ((self.rate * t) / newrate) - t
def get_total(self):
return self.total | gpl-2.0 |
mosajjal/mitmproxy | test/mitmproxy/addons/test_dumper.py | 2 | 5945 | import io
import shutil
import pytest
from unittest import mock
from mitmproxy.test import tflow
from mitmproxy.test import taddons
from mitmproxy.test import tutils
from mitmproxy.addons import dumper
from mitmproxy import exceptions
from mitmproxy.tools import dump
from mitmproxy import http
def test_configure():
d = dumper.Dumper()
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, filtstr="~b foo")
assert d.filter
f = tflow.tflow(resp=True)
assert not d.match(f)
f.response.content = b"foo"
assert d.match(f)
ctx.configure(d, filtstr=None)
assert not d.filter
with pytest.raises(exceptions.OptionsError):
ctx.configure(d, filtstr="~~")
assert not d.filter
def test_simple():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=0)
d.response(tflow.tflow(resp=True))
assert not sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=1)
d.error(tflow.tflow(err=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(resp=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
d.response(tflow.tflow(err=True))
assert "<<" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request = tutils.treq()
flow.request.stickycookie = True
flow.client_conn = mock.MagicMock()
flow.client_conn.address[0] = "foo"
flow.response = tutils.tresp(content=None)
flow.response.is_replay = True
flow.response.status_code = 300
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow(resp=tutils.tresp(content=b"{"))
flow.response.headers["content-type"] = "application/json"
flow.response.status_code = 400
d.response(flow)
assert sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=4)
flow = tflow.tflow()
flow.request.content = None
flow.response = http.HTTPResponse.wrap(tutils.tresp())
flow.response.content = None
d.response(flow)
assert "content missing" in sio.getvalue()
sio.truncate(0)
def test_echo_body():
f = tflow.tflow(client_conn=True, server_conn=True, resp=True)
f.response.headers["content-type"] = "text/html"
f.response.content = b"foo bar voing\n" * 100
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3)
d._echo_message(f.response)
t = sio.getvalue()
assert "cut off" in t
def test_echo_request_line():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = True
d._echo_request_line(f)
assert "[replay]" in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.is_replay = False
d._echo_request_line(f)
assert "[replay]" not in sio.getvalue()
sio.truncate(0)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
f.request.http_version = "nonstandard"
d._echo_request_line(f)
assert "nonstandard" in sio.getvalue()
sio.truncate(0)
ctx.configure(d, flow_detail=0, showhost=True)
f = tflow.tflow(client_conn=None, server_conn=True, resp=True)
terminalWidth = max(shutil.get_terminal_size()[0] - 25, 50)
f.request.url = "http://address:22/" + ("x" * terminalWidth) + "textToBeTruncated"
d._echo_request_line(f)
assert "textToBeTruncated" not in sio.getvalue()
sio.truncate(0)
class TestContentView:
@mock.patch("mitmproxy.contentviews.auto.ViewAuto.__call__")
def test_contentview(self, view_auto):
view_auto.side_effect = exceptions.ContentViewException("")
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=4, verbosity=3)
d.response(tflow.tflow())
assert "Content viewer failed" in ctx.master.event_log[0][1]
def test_tcp():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.ttcpflow()
d.tcp_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
f = tflow.ttcpflow(client_conn=True, err=True)
d.tcp_error(f)
assert "Error in TCP" in sio.getvalue()
def test_websocket():
sio = io.StringIO()
d = dumper.Dumper(sio)
with taddons.context(options=dump.Options()) as ctx:
ctx.configure(d, flow_detail=3, showhost=True)
f = tflow.twebsocketflow()
d.websocket_message(f)
assert "it's me" in sio.getvalue()
sio.truncate(0)
d.websocket_end(f)
assert "WebSocket connection closed by" in sio.getvalue()
f = tflow.twebsocketflow(client_conn=True, err=True)
d.websocket_error(f)
assert "Error in WebSocket" in sio.getvalue()
| mit |
rzhxeo/youtube-dl | youtube_dl/extractor/ro220.py | 176 | 1451 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
0k/OpenUpgrade | addons/account/report/account_financial_report.py | 380 | 6365 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
from common_report_header import common_report_header
from openerp.tools.translate import _
from openerp.osv import osv
class report_account_common(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(report_account_common, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'get_lines': self.get_lines,
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_account': self._get_account,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_filter': self._get_filter,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(report_account_common, self).set_context(objects, data, new_ids, report_type=report_type)
def get_lines(self, data):
lines = []
account_obj = self.pool.get('account.account')
currency_obj = self.pool.get('res.currency')
ids2 = self.pool.get('account.financial.report')._get_children_by_order(self.cr, self.uid, [data['form']['account_report_id'][0]], context=data['form']['used_context'])
for report in self.pool.get('account.financial.report').browse(self.cr, self.uid, ids2, context=data['form']['used_context']):
vals = {
'name': report.name,
'balance': report.balance * report.sign or 0.0,
'type': 'report',
'level': bool(report.style_overwrite) and report.style_overwrite or report.level,
'account_type': report.type =='sum' and 'view' or False, #used to underline the financial report balances
}
if data['form']['debit_credit']:
vals['debit'] = report.debit
vals['credit'] = report.credit
if data['form']['enable_filter']:
vals['balance_cmp'] = self.pool.get('account.financial.report').browse(self.cr, self.uid, report.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
lines.append(vals)
account_ids = []
if report.display_detail == 'no_detail':
#the rest of the loop is used to display the details of the financial report, so it's not needed here.
continue
if report.type == 'accounts' and report.account_ids:
account_ids = account_obj._get_children_and_consol(self.cr, self.uid, [x.id for x in report.account_ids])
elif report.type == 'account_type' and report.account_type_ids:
account_ids = account_obj.search(self.cr, self.uid, [('user_type','in', [x.id for x in report.account_type_ids])])
if account_ids:
for account in account_obj.browse(self.cr, self.uid, account_ids, context=data['form']['used_context']):
#if there are accounts to display, we add them to the lines with a level equals to their level in
#the COA + 1 (to avoid having them with a too low level that would conflicts with the level of data
#financial reports for Assets, liabilities...)
if report.display_detail == 'detail_flat' and account.type == 'view':
continue
flag = False
vals = {
'name': account.code + ' ' + account.name,
'balance': account.balance != 0 and account.balance * report.sign or account.balance,
'type': 'account',
'level': report.display_detail == 'detail_with_hierarchy' and min(account.level + 1,6) or 6, #account.level + 1
'account_type': account.type,
}
if data['form']['debit_credit']:
vals['debit'] = account.debit
vals['credit'] = account.credit
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance']):
flag = True
if data['form']['enable_filter']:
vals['balance_cmp'] = account_obj.browse(self.cr, self.uid, account.id, context=data['form']['comparison_context']).balance * report.sign or 0.0
if not currency_obj.is_zero(self.cr, self.uid, account.company_id.currency_id, vals['balance_cmp']):
flag = True
if flag:
lines.append(vals)
return lines
class report_financial(osv.AbstractModel):
_name = 'report.account.report_financial'
_inherit = 'report.abstract_report'
_template = 'account.report_financial'
_wrapped_report_class = report_account_common
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jonathonwalz/ansible | lib/ansible/modules/monitoring/zabbix_maintenance.py | 35 | 12164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: zabbix_maintenance
short_description: Create Zabbix maintenance windows
description:
- This module will let you create Zabbix maintenance windows.
version_added: "1.8"
author: "Alexander Bulimov (@abulimov)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
state:
description:
- Create or remove a maintenance window.
required: false
default: present
choices: [ "present", "absent" ]
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
default: null
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_names:
description:
- Hosts to manage maintenance window for.
Separate multiple hosts with commas.
C(host_name) is an alias for C(host_names).
B(Required) option when C(state) is I(present)
and no C(host_groups) specified.
required: false
default: null
aliases: [ "host_name" ]
host_groups:
description:
- Host groups to manage maintenance window for.
Separate multiple groups with commas.
C(host_group) is an alias for C(host_groups).
B(Required) option when C(state) is I(present)
and no C(host_names) specified.
required: false
default: null
aliases: [ "host_group" ]
minutes:
description:
- Length of maintenance window in minutes.
required: false
default: 10
name:
description:
- Unique name of maintenance window.
required: true
desc:
description:
- Short description of maintenance window.
required: true
default: Created by Ansible
collect_data:
description:
- Type of maintenance. With data collection, or without.
required: false
default: "true"
timeout:
description:
- The timeout of API request (seconds).
default: 10
version_added: "2.1"
required: false
notes:
- Useful for setting hosts in maintenance mode before big update,
and removing maintenance window after update.
- Module creates maintenance window from now() to now() + minutes,
so if Zabbix server's time and host's time are not synchronized,
you will get strange results.
- Install required module with 'pip install zabbix-api' command.
- Checks existence only by maintenance name.
'''
EXAMPLES = '''
- name: Create a named maintenance window for host www1 for 90 minutes
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
state: present
minutes: 90
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for host www1 and host groups Office and Dev
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
host_groups:
- Office
- Dev
state: present
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for hosts www1 and db1, without data collection.
zabbix_maintenance:
name: update
host_names:
- www1.example.com
- db1.example.com
state: present
collect_data: False
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Remove maintenance window by name
zabbix_maintenance:
name: Test1
state: absent
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
'''
import datetime
import time
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
end_time = start_time + period
try:
zbx.maintenance.create(
{
"groupids": group_ids,
"hostids": host_ids,
"name": name,
"maintenance_type": maintenance_type,
"active_since": str(start_time),
"active_till": str(end_time),
"description": desc,
"timeperiods": [{
"timeperiod_type": "0",
"start_date": str(start_time),
"period": str(period),
}]
}
)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_maintenance_id(zbx, name):
try:
result = zbx.maintenance.get(
{
"filter":
{
"name": name,
}
}
)
except BaseException as e:
return 1, None, str(e)
maintenance_ids = []
for res in result:
maintenance_ids.append(res["maintenanceid"])
return 0, maintenance_ids, None
def delete_maintenance(zbx, maintenance_id):
try:
zbx.maintenance.delete(maintenance_id)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_group_ids(zbx, host_groups):
group_ids = []
for group in host_groups:
try:
result = zbx.hostgroup.get(
{
"output": "extend",
"filter":
{
"name": group
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Group id for group %s not found" % group
group_ids.append(result[0]["groupid"])
return 0, group_ids, None
def get_host_ids(zbx, host_names):
host_ids = []
for host in host_names:
try:
result = zbx.host.get(
{
"output": "extend",
"filter":
{
"name": host
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Host id for host %s not found" % host
host_ids.append(result[0]["hostid"])
return 0, host_ids, None
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
server_url=dict(type='str', required=True, default=None, aliases=['url']),
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
minutes=dict(type='int', required=False, default=10),
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
name=dict(type='str', required=True),
desc=dict(type='str', required=False, default="Created by Ansible"),
collect_data=dict(type='bool', required=False, default=True),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
host_names = module.params['host_names']
host_groups = module.params['host_groups']
state = module.params['state']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
minutes = module.params['minutes']
name = module.params['name']
desc = module.params['desc']
server_url = module.params['server_url']
collect_data = module.params['collect_data']
timeout = module.params['timeout']
if collect_data:
maintenance_type = 0
else:
maintenance_type = 1
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except BaseException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
changed = False
if state == "present":
now = datetime.datetime.now()
start_time = time.mktime(now.timetuple())
period = 60 * int(minutes) # N * 60 seconds
if host_groups:
(rc, group_ids, error) = get_group_ids(zbx, host_groups)
if rc != 0:
module.fail_json(msg="Failed to get group_ids: %s" % error)
else:
group_ids = []
if host_names:
(rc, host_ids, error) = get_host_ids(zbx, host_names)
if rc != 0:
module.fail_json(msg="Failed to get host_ids: %s" % error)
else:
host_ids = []
(rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if not maintenance:
if not host_names and not host_groups:
module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
if module.check_mode:
changed = True
else:
(rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to create maintenance: %s" % error)
if state == "absent":
(rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if maintenance:
if module.check_mode:
changed = True
else:
(rc, _, error) = delete_maintenance(zbx, maintenance)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to remove maintenance: %s" % error)
module.exit_json(changed=changed)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
upliftaero/MissionPlanner | Lib/site-packages/numpy/lib/function_base.py | 53 | 108301 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
if sys.platform != 'cli':
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
else:
from _compiled_base import _insert, bincount
# TODO: Implement these
def add_docstring(*args, **kw):
pass
def digitize(*args, **kw):
raise NotImplementedError()
def compiled_interp(*args, **kw):
raise NotImplementedError()
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), normed=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, normed=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if range is None:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
uniform = True
else:
bins = asarray(bins)
uniform = False
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if normed:
db = array(np.diff(bins), float)
if not uniform:
warnings.warn("""
This release of NumPy fixes a normalization bug in histogram
function occuring with non-uniform bin widths. The returned
value is now a density: n / (N * bin width), where n is the
bin count and N the total number of points.
""")
return n/db/n.sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : boolean, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1D histogram
histogram2d: 2D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
nbin = asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
decimal = int(-log10(dedges[i].min())) +6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
return _nanop(np.min, np.inf, a, axis)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
return _nanop(np.max, -np.inf, a, axis)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {None, int}, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
percentile to compute which must be between 0 and 100 inclusive
axis : {None, int}, optional
Axis along which the percentiles are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest neighbors
is used if the normalized ranking does not match q exactly.
The same as the median if q is 0.5; the same as the min if q is 0;
and the same as the max if q is 1
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 0.5)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 0.5, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 0.5, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-3.0 |
stephane-martin/salt-debian-packaging | salt-2016.3.2/tests/unit/modules/mac_assistive_test.py | 2 | 5515 | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.exceptions import CommandExecutionError
from salt.modules import mac_assistive as assistive
assistive.__salt__ = {}
assistive.__grains__ = {}
class AssistiveTestCase(TestCase):
def test_install_assistive_bundle(self):
'''
Test installing a bundle ID as being allowed to run with assistive access
'''
mock_ret = MagicMock(return_value={'retcode': 0})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
with patch.dict(assistive.__grains__, {'osrelease': '10.11.3'}):
self.assertTrue(assistive.install('foo'))
def test_install_assistive_error(self):
'''
Test installing a bundle ID as being allowed to run with assistive access
'''
mock_ret = MagicMock(return_value={'retcode': 1})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
with patch.dict(assistive.__grains__, {'osrelease': '10.11.3'}):
self.assertRaises(CommandExecutionError, assistive.install, 'foo')
@patch('salt.modules.mac_assistive._get_assistive_access', MagicMock(return_value=[('foo', 0)]))
def test_installed_bundle(self):
'''
Test checking to see if a bundle id is installed as being able to use assistive access
'''
self.assertTrue(assistive.installed('foo'))
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[]))
def test_installed_bundle_not(self):
'''
Test checking to see if a bundle id is installed as being able to use assistive access
'''
self.assertFalse(assistive.installed('foo'))
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[('foo', 0)]))
def test_enable_assistive(self):
'''
Test enabling a bundle ID as being allowed to run with assistive access
'''
mock_ret = MagicMock(return_value={'retcode': 0})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertTrue(assistive.enable('foo', True))
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[('foo', 0)]))
def test_enable_error(self):
'''
Test enabled a bundle ID that throws a command error
'''
mock_ret = MagicMock(return_value={'retcode': 1})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertRaises(CommandExecutionError,
assistive.enable,
'foo')
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[]))
def test_enable_false(self):
'''
Test return of enable function when app isn't found.
'''
self.assertFalse(assistive.enable('foo'))
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[('foo', '1')]))
def test_enabled_assistive(self):
'''
Test enabling a bundle ID as being allowed to run with assistive access
'''
self.assertTrue(assistive.enabled('foo'))
@patch('salt.modules.mac_assistive._get_assistive_access',
MagicMock(return_value=[]))
def test_enabled_assistive_false(self):
'''
Test if a bundle ID is disabled for assistive access
'''
self.assertFalse(assistive.enabled('foo'))
def test_remove_assistive(self):
'''
Test removing an assitive bundle.
'''
mock_ret = MagicMock(return_value={'retcode': 0})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertTrue(assistive.remove('foo'))
def test_remove_assistive_error(self):
'''
Test removing an assitive bundle.
'''
mock_ret = MagicMock(return_value={'retcode': 1})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertRaises(CommandExecutionError,
assistive.remove,
'foo')
def test_get_assistive_access(self):
'''
Test if a bundle ID is enabled for assistive access
'''
mock_out = 'kTCCServiceAccessibility|/bin/bash|1|1|1|\n' \
'kTCCServiceAccessibility|/usr/bin/osascript|1|1|1|'
mock_ret = MagicMock(return_value={'retcode': 0, 'stdout': mock_out})
expected = [('/bin/bash', '1'), ('/usr/bin/osascript', '1')]
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertEqual(assistive._get_assistive_access(), expected)
def test_get_assistive_access_error(self):
'''
Test a CommandExecutionError is raised when something goes wrong.
'''
mock_ret = MagicMock(return_value={'retcode': 1})
with patch.dict(assistive.__salt__, {'cmd.run_all': mock_ret}):
self.assertRaises(CommandExecutionError,
assistive._get_assistive_access)
if __name__ == '__main__':
from integration import run_tests
run_tests(AssistiveTestCase, needs_daemon=False)
| apache-2.0 |
yakovenkodenis/rethinkdb | test/rql_test/connections/http_support/flask/_compat.py | 783 | 2164 | # -*- coding: utf-8 -*-
"""
flask._compat
~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if not PY2:
text_type = str
string_types = (str,)
integer_types = (int, )
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
| agpl-3.0 |
maciekcc/tensorflow | tensorflow/python/kernel_tests/edit_distance_op_test.py | 139 | 8145 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string or unicode
if x.dtype.char not in "SU":
x = np.asarray(x, dtype=np.int64)
return constant_op.constant(x)
class EditDistanceTest(test.TestCase):
def _testEditDistanceST(self,
hypothesis_st,
truth_st,
normalize,
expected_output,
expected_shape,
expected_err_re=None):
edit_distance = array_ops.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def _testEditDistance(self,
hypothesis,
truth,
normalize,
expected_output,
expected_err_re=None):
# Shape inference figures out the shape from the shape variables
# Explicit tuple() needed since zip returns an iterator in Python 3.
expected_shape = [
max(h, t) for h, t in tuple(zip(hypothesis[2], truth[2]))[:-1]
]
# SparseTensorValue inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensorValue(
*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
# SparseTensor inputs.
with ops.Graph().as_default() as g, self.test_session(g):
# hypothesis and truth are (index, value, shape) tuples
self._testEditDistanceST(
hypothesis_st=sparse_tensor.SparseTensor(
*[ConstantOf(x) for x in hypothesis]),
truth_st=sparse_tensor.SparseTensor(*[ConstantOf(x) for x in truth]),
normalize=normalize,
expected_output=expected_output,
expected_shape=expected_shape,
expected_err_re=expected_err_re)
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
hypothesis_values = [0, 1, 1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [1, 0], [1, 1]]
truth_values = [0, 1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0], [1, 0], [1, 1]]
hypothesis_values = [10, 10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1], [1, 0], [1, 1]]
truth_values = [1, 2, 1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0 / len("altruistic"), 6.0 / len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0], [1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0], [1, 0, 0], [1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [
[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]
] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, loss is 1/0 = inf
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceZeroLengthHypothesisAndTruth(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [0] # Normalized is 0 because of exact match
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mith1979/pynet_course | netmiko/tests/old_format/test_base_connection.py | 14 | 7119 | #!/usr/bin/env python
'''
This module runs tests against Cisco IOS devices.
setup_module: setup variables for later use.
test_init_method: verify attributes get set properly in init
test_session_preparation: verify session_preparation method
test_establish_connection: verify SSH connection gets established
test_disable_paging: disable paging
test_set_base_prompt: verify the base prompt is properly set
test_find_prompt: verify find prompt method
test_clear_buffer: clear SSH buffer
test_send_command: send a command
test_send_command_expect: send a command using expect-like method
test_normalize_linefeeds: ensure \n is the only line termination character in output
test_disconnect: cleanly disconnect the SSH session
'''
from os import path
import time
import pytest
from netmiko import ConnectHandler
from test_utils import parse_yaml
def setup_module(module):
'''
Setup variables for tests.
'''
test_type = 'cisco_ios'
pwd = path.dirname(path.realpath(__file__))
responses = parse_yaml(pwd + "/etc/responses.yml")
module.EXPECTED_RESPONSES = responses[test_type]
commands = parse_yaml(pwd + "/etc/commands.yml")
module.commands = commands[test_type]
test_devices = parse_yaml(pwd + "/etc/test_devices.yml")
module.device = test_devices[test_type]
device['verbose'] = False
module.net_connect = ConnectHandler(**device)
def test_init_method():
'''
Verify attributes assigned in __init__ method
'''
assert net_connect.ip == device['ip']
assert net_connect.port == device.get('port', 22)
assert net_connect.username == device['username']
assert net_connect.password == device['password']
assert net_connect.secret == device['secret']
assert net_connect.device_type == device['device_type']
assert net_connect.ansi_escape_codes == False
assert net_connect.base_prompt == EXPECTED_RESPONSES['base_prompt']
def test_session_preparation():
'''
Paging should be disabled and base_prompt should be set
'''
assert net_connect.base_prompt == EXPECTED_RESPONSES['base_prompt']
show_version = net_connect.send_command(commands['version'])
assert 'Configuration register is' in show_version
def test_establish_connection():
'''
Verify connection gets established
'''
show_ip = net_connect.send_command(commands['basic'])
assert EXPECTED_RESPONSES['interface_ip'] in show_ip
def test_disable_paging():
'''
Verify paging is disabled
'''
multiple_line_output = net_connect.send_command(commands['extended_output'])
assert 'Configuration register is' in multiple_line_output
def test_set_base_prompt():
'''
Verify the set_base_prompt() method
'''
assert net_connect.base_prompt == EXPECTED_RESPONSES['base_prompt']
def test_find_prompt():
'''
Verify the find_prompt() method returns the current prompt
'''
assert net_connect.find_prompt() == EXPECTED_RESPONSES['router_prompt']
def test_clear_buffer():
'''
Verify the clear_buffer() method removes any outstanding data from the SSH channel
'''
# Manually send a command down the channel so that data needs read.
net_connect.remote_conn.sendall(commands["basic"] + '\n')
time.sleep(2)
net_connect.clear_buffer()
# Should not be anything there on the second pass
clear_buffer_check = net_connect.clear_buffer()
assert clear_buffer_check is None
def test_send_command():
'''
Verify send_command() method with default parameters works properly
Verify send_command() with additional delay works properly
Verify send_command() with less delay works properly
Verify send_command() with lower max_loops works properly
'''
basic_command = commands.get('basic')
send_command_std = net_connect.send_command(basic_command)
send_command_slow = net_connect.send_command(basic_command, delay_factor=2)
send_command_fast = net_connect.send_command(basic_command, delay_factor=.25)
send_command_max_loops = net_connect.send_command(basic_command, max_loops=10)
strip_prompt_true = send_command_std
strip_prompt_false = net_connect.send_command(basic_command,
strip_prompt=False)
strip_command_true = send_command_std
strip_command_false = net_connect.send_command(basic_command,
strip_command=False)
assert EXPECTED_RESPONSES['interface_ip'] in send_command_std
assert EXPECTED_RESPONSES['interface_ip'] in send_command_slow
assert EXPECTED_RESPONSES['interface_ip'] in send_command_fast
assert EXPECTED_RESPONSES['interface_ip'] in send_command_max_loops
assert EXPECTED_RESPONSES['base_prompt'] not in strip_prompt_true
assert EXPECTED_RESPONSES['base_prompt'] in strip_prompt_false
assert basic_command not in strip_command_true
assert basic_command in strip_command_false
def test_send_command_expect():
'''
Verify send_command_expect() method with default parameters works properly
Verify send_command_expect() method with a different expect string
Verify send_command_expect() with additional delay works properly
Verify send_command_expect() with less delay works properly
Verify send_command_expect() with lower max_loops works properly
'''
basic_command = commands.get('basic')
cmd_expect_std = net_connect.send_command_expect(basic_command)
cmd_expect_short = net_connect.send_command_expect(basic_command,
expect_string=commands.get("interface_name"))
cmd_expect_slow = net_connect.send_command_expect(basic_command, delay_factor=2)
cmd_expect_fast = net_connect.send_command_expect(basic_command, delay_factor=.25)
cmd_expect_max_loops = net_connect.send_command_expect(basic_command, max_loops=10)
expect_strip_prompt_true = cmd_expect_std
expect_strip_prompt_false = net_connect.send_command_expect(basic_command,
strip_prompt=False)
expect_strip_command_true = cmd_expect_std
expect_strip_command_false = net_connect.send_command_expect(basic_command,
strip_command=False)
assert EXPECTED_RESPONSES['interface_ip'] in cmd_expect_std
# assert EXPECTED_RESPONSES['interface_ip'] not in cmd_expect_short
assert EXPECTED_RESPONSES['interface_ip'] in cmd_expect_slow
assert EXPECTED_RESPONSES['interface_ip'] in cmd_expect_fast
assert EXPECTED_RESPONSES['interface_ip'] in cmd_expect_max_loops
assert EXPECTED_RESPONSES['base_prompt'] not in expect_strip_prompt_true
assert EXPECTED_RESPONSES['base_prompt'] in expect_strip_prompt_false
assert basic_command not in expect_strip_command_true
assert basic_command in expect_strip_command_false
def test_normalize_linefeeds():
'''
Verify that '\r\n' are converted to '\n'
'''
show_ip = net_connect.send_command(commands['basic'])
net_connect.remote_conn.sendall('show ip int brief\n')
time.sleep(1)
raw_output = net_connect.clear_buffer()
assert '\r\n' in raw_output
assert '\r\n' not in show_ip
def test_disconnect():
'''
Terminate the SSH session
'''
net_connect.disconnect()
| apache-2.0 |
nitinmeharia/django-rest-swagger | rest_framework_swagger/docgenerator.py | 5 | 13636 | """Generates API documentation by introspection."""
from django.contrib.auth.models import AnonymousUser
import rest_framework
from rest_framework import viewsets
from rest_framework.serializers import BaseSerializer
from .introspectors import (
APIViewIntrospector,
BaseMethodIntrospector,
IntrospectorHelper,
ViewSetIntrospector,
WrappedAPIViewIntrospector,
get_data_type,
get_default_value,
)
from .compat import OrderedDict
class DocumentationGenerator(object):
# Serializers defined in docstrings
explicit_serializers = set()
# Serializers defined in fields
fields_serializers = set()
# Response classes defined in docstrings
explicit_response_types = dict()
def __init__(self, for_user=None):
self.user = for_user or AnonymousUser()
def generate(self, apis):
"""
Returns documentation for a list of APIs
"""
api_docs = []
for api in apis:
api_docs.append({
'description': IntrospectorHelper.get_summary(api['callback']),
'path': api['path'],
'operations': self.get_operations(api, apis),
})
return api_docs
def get_introspector(self, api, apis):
path = api['path']
pattern = api['pattern']
callback = api['callback']
if callback.__module__ == 'rest_framework.decorators':
return WrappedAPIViewIntrospector(callback, path, pattern, self.user)
elif issubclass(callback, viewsets.ViewSetMixin):
patterns = [a['pattern'] for a in apis
if a['callback'] == callback]
return ViewSetIntrospector(callback, path, pattern, self.user, patterns=patterns)
else:
return APIViewIntrospector(callback, path, pattern, self.user)
def get_operations(self, api, apis=None):
"""
Returns docs for the allowed methods of an API endpoint
"""
if apis is None:
apis = [api]
operations = []
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
if not isinstance(method_introspector, BaseMethodIntrospector) or \
method_introspector.get_http_method() == "OPTIONS":
continue # No one cares. I impose JSON.
doc_parser = method_introspector.get_yaml_parser()
serializer = self._get_method_serializer(method_introspector)
response_type = self._get_method_response_type(
doc_parser, serializer, introspector, method_introspector)
operation = {
'method': method_introspector.get_http_method(),
'summary': method_introspector.get_summary(),
'nickname': method_introspector.get_nickname(),
'notes': method_introspector.get_notes(),
'type': response_type,
}
if doc_parser.yaml_error is not None:
operation['notes'] += "<pre>YAMLError:\n {err}</pre>".format(
err=doc_parser.yaml_error)
response_messages = doc_parser.get_response_messages()
parameters = doc_parser.discover_parameters(
inspector=method_introspector)
operation['parameters'] = parameters or []
if response_messages:
operation['responseMessages'] = response_messages
# operation.consumes
consumes = doc_parser.get_consumes()
if consumes:
operation['consumes'] = consumes
# operation.produces
produces = doc_parser.get_produces()
if produces:
operation['produces'] = produces
operations.append(operation)
return operations
def get_models(self, apis):
"""
Builds a list of Swagger 'models'. These represent
DRF serializers and their fields
"""
serializers = self._get_serializer_set(apis)
serializers.update(self.explicit_serializers)
serializers.update(
self._find_field_serializers(serializers)
)
models = {}
for serializer in serializers:
data = self._get_serializer_fields(serializer)
# Register 2 models with different subset of properties suitable
# for data reading and writing.
# i.e. rest framework does not output write_only fields in response
# or require read_only fields in complex input.
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
# Writing
# no readonly fields
w_name = "Write{serializer}".format(serializer=serializer_name)
w_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['read_only'])
models[w_name] = {
'id': w_name,
'required': [i for i in data['required'] if i in w_properties.keys()],
'properties': w_properties,
}
# Reading
# no write_only fields
r_name = serializer_name
r_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['write_only'])
models[r_name] = {
'id': r_name,
'required': [i for i in r_properties.keys()],
'properties': r_properties,
}
# Enable original model for testing purposes
# models[serializer_name] = {
# 'id': serializer_name,
# 'required': data['required'],
# 'properties': data['fields'],
# }
models.update(self.explicit_response_types)
models.update(self.fields_serializers)
return models
def _get_method_serializer(self, method_inspector):
"""
Returns serializer used in method.
Registers custom serializer from docstring in scope.
Serializer might be ignored if explicitly told in docstring
"""
doc_parser = method_inspector.get_yaml_parser()
if doc_parser.get_response_type() is not None:
# Custom response class detected
return None
if doc_parser.should_omit_serializer():
return None
serializer = method_inspector.get_response_serializer_class()
return serializer
def _get_method_response_type(self, doc_parser, serializer,
view_inspector, method_inspector):
"""
Returns response type for method.
This might be custom `type` from docstring or discovered
serializer class name.
Once custom `type` found in docstring - it'd be
registered in a scope
"""
response_type = doc_parser.get_response_type()
if response_type is not None:
# Register class in scope
view_name = view_inspector.callback.__name__
view_name = view_name.replace('ViewSet', '')
view_name = view_name.replace('APIView', '')
view_name = view_name.replace('View', '')
response_type_name = "{view}{method}Response".format(
view=view_name,
method=method_inspector.method.title().replace('_', '')
)
self.explicit_response_types.update({
response_type_name: {
"id": response_type_name,
"properties": response_type
}
})
return response_type_name
else:
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is not None:
return serializer_name
return 'object'
def _get_serializer_set(self, apis):
"""
Returns a set of serializer classes for a provided list
of APIs
"""
serializers = set()
for api in apis:
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
serializer = self._get_method_serializer(method_introspector)
if serializer is not None:
serializers.add(serializer)
extras = method_introspector.get_extra_serializer_classes()
for extra in extras:
if extra is not None:
serializers.add(extra)
return serializers
def _find_field_serializers(self, serializers, found_serializers=set()):
"""
Returns set of serializers discovered from fields
"""
def get_thing(field, key):
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
if isinstance(field, ListSerializer):
return key(field.child)
return key(field)
serializers_set = set()
for serializer in serializers:
fields = serializer().get_fields()
for name, field in fields.items():
if isinstance(field, BaseSerializer):
serializers_set.add(get_thing(field, lambda f: f))
if field not in found_serializers:
serializers_set.update(
self._find_field_serializers(
(get_thing(field, lambda f: f.__class__),),
serializers_set))
return serializers_set
def _get_serializer_fields(self, serializer):
"""
Returns serializer fields in the Swagger MODEL format
"""
if serializer is None:
return
if hasattr(serializer, '__call__'):
fields = serializer().get_fields()
else:
fields = serializer.get_fields()
data = OrderedDict({
'fields': OrderedDict(),
'required': [],
'write_only': [],
'read_only': [],
})
for name, field in fields.items():
if getattr(field, 'write_only', False):
data['write_only'].append(name)
if getattr(field, 'read_only', False):
data['read_only'].append(name)
if getattr(field, 'required', False):
data['required'].append(name)
data_type, data_format = get_data_type(field) or ('string', 'string')
if data_type == 'hidden':
continue
# guess format
# data_format = 'string'
# if data_type in BaseMethodIntrospector.PRIMITIVES:
# data_format = BaseMethodIntrospector.PRIMITIVES.get(data_type)[0]
description = getattr(field, 'help_text', '')
if not description or description.strip() == '':
description = None
f = {
'description': description,
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
'readOnly': getattr(field, 'read_only', None),
}
# Swagger type is a primitive, format is more specific
if f['type'] == f['format']:
del f['format']
# defaultValue of null is not allowed, it is specific to type
if f['defaultValue'] is None:
del f['defaultValue']
# Min/Max values
max_value = getattr(field, 'max_value', None)
min_value = getattr(field, 'min_value', None)
if max_value is not None and data_type == 'integer':
f['minimum'] = min_value
if max_value is not None and data_type == 'integer':
f['maximum'] = max_value
# ENUM options
if data_type in BaseMethodIntrospector.ENUMS:
if isinstance(field.choices, list):
f['enum'] = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
f['enum'] = [k for k, v in field.choices.items()]
# Support for complex types
if rest_framework.VERSION < '3.0.0':
has_many = hasattr(field, 'many') and field.many
else:
from rest_framework.serializers import ListSerializer, ManyRelatedField
has_many = isinstance(field, (ListSerializer, ManyRelatedField))
if isinstance(field, BaseSerializer) or has_many:
if isinstance(field, BaseSerializer):
field_serializer = IntrospectorHelper.get_serializer_name(field)
if getattr(field, 'write_only', False):
field_serializer = "Write{}".format(field_serializer)
f['type'] = field_serializer
else:
field_serializer = None
data_type = 'string'
if has_many:
f['type'] = 'array'
if field_serializer:
f['items'] = {'$ref': field_serializer}
elif data_type in BaseMethodIntrospector.PRIMITIVES:
f['items'] = {'type': data_type}
# memorize discovered field
data['fields'][name] = f
return data
| bsd-2-clause |
songmonit/CTTMSONLINE | addons/l10n_cl/__init__.py | 2120 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Cubic ERP - Teradata SAC. (http://cubicerp.com).
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lnielsen/invenio | invenio/ext/sqlalchemy/types/legacymediuminteger.py | 2 | 1845 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Platform-independent MediumInteger type."""
from sqlalchemy.types import Integer, TypeDecorator
from sqlalchemy.dialects.mysql import MEDIUMINT
class LegacyMediumInteger(TypeDecorator):
"""Platform-independent MediumInteger type.
Uses MySQL's :class:`~sqlalchemy.dialects.mysql.MEDIUMINT` type, otherwise
uses SQLAlchemy definition of :class:`~sqlalchemy.types.Integer`.
"""
impl = Integer
def __init__(self, display_width=9, unsigned=False, **kwargs):
"""Reserve special arguments only for MySQL Platform."""
self.display_width = display_width
self.unsigned = unsigned
super(LegacyMediumInteger, self).__init__(**kwargs)
def load_dialect_impl(self, dialect):
"""Load dialect dependent implementation."""
if dialect.name == 'mysql':
return dialect.type_descriptor(MEDIUMINT(self.display_width,
unsigned=self.unsigned))
else:
return dialect.type_descriptor(Integer)
| gpl-2.0 |
ykaneko/neutron | neutron/openstack/common/db/exception.py | 13 | 1629 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB related custom exceptions."""
from neutron.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
super(DBError, self).__init__(str(inner_exception))
class DBDuplicateEntry(DBError):
"""Wraps an implementation specific exception."""
def __init__(self, columns=[], inner_exception=None):
self.columns = columns
super(DBDuplicateEntry, self).__init__(inner_exception)
class DBDeadlock(DBError):
def __init__(self, inner_exception=None):
super(DBDeadlock, self).__init__(inner_exception)
class DBInvalidUnicodeParameter(Exception):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.