path
stringlengths 23
146
| source_code
stringlengths 0
261k
|
|---|---|
data/Theano/Theano/theano/gof/cutils.py
|
from __future__ import absolute_import, print_function, division
import errno
import os
import sys
from theano.compat import PY3
from theano.gof.compilelock import get_lock, release_lock
from theano import config
from . import cmodule
if os.path.exists(os.path.join(config.compiledir, 'cutils_ext.so')):
os.remove(os.path.join(config.compiledir, 'cutils_ext.so'))
def compile_cutils_code():
types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64', 'int128',
'int256', 'uint8', 'uint16', 'uint32',
'uint64', 'uint128', 'uint256',
'float16', 'float32', 'float64',
'float80', 'float96', 'float128',
'float256']]
complex_types = ['npy_' + t for t in ['complex32', 'complex64',
'complex128', 'complex160',
'complex192', 'complex512']]
inplace_map_template = """
static void %(type)s_inplace_add(PyArrayMapIterObject *mit,
PyArrayIterObject *it, int inc_or_set)
{
int index = mit->size;
while (index--) {
%(op)s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
"""
floatadd = ("((%(type)s*)mit->dataptr)[0] = "
"(inc_or_set ? ((%(type)s*)mit->dataptr)[0] : 0)"
" + ((%(type)s*)it->dataptr)[0];")
complexadd = """
((%(type)s*)mit->dataptr)[0].real =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].real : 0)
+ ((%(type)s*)it->dataptr)[0].real;
((%(type)s*)mit->dataptr)[0].imag =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].imag : 0)
+ ((%(type)s*)it->dataptr)[0].imag;
"""
fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),
'op': floatadd % {'type': t}}
for t in types] +
[inplace_map_template % {'type': t, 'typen': t.upper(),
'op': complexadd % {'type': t}}
for t in complex_types])
def gen_binop(type, typen):
return """
%(type)s_inplace_add,
""" % dict(type=type, typen=typen)
fn_array = ("static inplace_map_binop addition_funcs[] = {" +
''.join([gen_binop(type=t, typen=t.upper())
for t in types + complex_types]) + "NULL};\n")
def gen_num(typen):
return """
%(typen)s,
""" % dict(type=type, typen=typen)
type_number_array = ("static int type_numbers[] = {" +
''.join([gen_num(typen=t.upper())
for t in types + complex_types]) + "-1000};")
code = ("""
typedef void (*inplace_map_binop)(PyArrayMapIterObject *,
PyArrayIterObject *, int inc_or_set);
""" + fns + fn_array + type_number_array + """
static int
map_increment(PyArrayMapIterObject *mit, PyObject *op,
inplace_map_binop add_inplace, int inc_or_set)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny(op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it, inc_or_set);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static PyObject *
inplace_increment(PyObject *dummy, PyObject *args)
{
PyObject *arg_a = NULL, *index=NULL, *inc=NULL;
int inc_or_set = 1;
PyArrayObject *a;
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i = 0;
PyArrayMapIterObject * mit;
if (!PyArg_ParseTuple(args, "OOO|i", &arg_a, &index,
&inc, &inc_or_set)) {
return NULL;
}
if (!PyArray_Check(arg_a)) {
PyErr_SetString(PyExc_ValueError,
"needs an ndarray as first argument");
return NULL;
}
a = (PyArrayObject *) arg_a;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return NULL;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return NULL;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return NULL;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace, inc_or_set) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return Py_None;
fail:
Py_XDECREF(mit);
return NULL;
}
""")
return code
def compile_cutils():
"""
Do just the compilation of cutils_ext.
"""
code = ("""
extern "C"{
static PyObject *
run_cthunk(PyObject *self, PyObject *args)
{
PyObject *py_cthunk = NULL;
if(!PyArg_ParseTuple(args,"O",&py_cthunk))
return NULL;
if (!PyCObject_Check(py_cthunk)) {
PyErr_SetString(PyExc_ValueError,
"Argument to run_cthunk must be a PyCObject.");
return NULL;
}
void * ptr_addr = PyCObject_AsVoidPtr(py_cthunk);
int (*fn)(void*) = (int (*)(void*))(ptr_addr);
void* it = PyCObject_GetDesc(py_cthunk);
int failure = fn(it);
return Py_BuildValue("i", failure);
}""")
code += compile_cutils_code()
code += ("""static PyMethodDef CutilsExtMethods[] = {
{"run_cthunk", run_cthunk, METH_VARARGS|METH_KEYWORDS,
"Run a theano cthunk."},
{"inplace_increment", inplace_increment,
METH_VARARGS,
"increments a numpy array inplace at the passed indexes."},
{NULL, NULL, 0, NULL} /* Sentinel */
};""")
if PY3:
code = code.replace("<Python.h>", '"numpy/npy_3kcompat.h"', 1)
code = code.replace("PyCObject", "NpyCapsule")
code += """
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"cutils_ext",
NULL,
-1,
CutilsExtMethods,
};
PyMODINIT_FUNC
PyInit_cutils_ext(void) {
import_array();
return PyModule_Create(&moduledef);
}
}
"""
else:
code += """
PyMODINIT_FUNC
initcutils_ext(void)
{
import_array();
(void) Py_InitModule("cutils_ext", CutilsExtMethods);
}
} //extern C
"""
loc = os.path.join(config.compiledir, 'cutils_ext')
if not os.path.exists(loc):
try:
os.mkdir(loc)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(loc), loc
args = cmodule.GCC_compiler.compile_args()
cmodule.GCC_compiler.compile_str('cutils_ext', code, location=loc,
preargs=args)
try:
sys.path.insert(0, config.compiledir)
location = os.path.join(config.compiledir, 'cutils_ext')
if not os.path.exists(location):
try:
os.mkdir(location)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(location), location
if not os.path.exists(os.path.join(location, '__init__.py')):
open(os.path.join(location, '__init__.py'), 'w').close()
try:
from cutils_ext.cutils_ext import *
except ImportError:
get_lock()
try:
try:
from cutils_ext.cutils_ext import *
except ImportError:
compile_cutils()
from cutils_ext.cutils_ext import *
finally:
release_lock()
finally:
if sys.path[0] == config.compiledir:
del sys.path[0]
|
data/IndicoDataSolutions/Passage/examples/load.py
|
import os
import numpy as np
def load_gender_data(ntrain=10000, ntest=10000):
import pandas as pd
file_loc = os.path.dirname(os.path.realpath(__file__))
relative_path = "blogger_data_2.csv"
fullpath = os.path.join(file_loc, relative_path)
data = pd.read_csv(fullpath, nrows=ntrain+ntest)
X = data['text'].values
X = [str(x) for x in X]
Y = data['gender'].values
trX = X[:-ntest]
teX = X[-ntest:]
trY = Y[:-ntest]
teY = Y[-ntest:]
return trX, teX, trY, teY
def load_mnist(data_dir=None):
if data_dir is None:
import urllib
import gzip
url = 'http://yann.lecun.com/exdb/mnist/'
fnames = [
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz',
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz'
]
for fname in fnames:
if not os.path.isfile(fname):
print 'data_dir not given and file not local - downloading mnist file:', fname
urllib.urlretrieve(url+fname, fname)
data_dir = ''
fd = gzip.open(os.path.join(data_dir,'train-images-idx3-ubyte.gz'))
loaded = np.fromstring(fd.read(), dtype=np.uint8)
trX = loaded[16:].reshape((60000, -1))
fd = gzip.open(os.path.join(data_dir,'train-labels-idx1-ubyte.gz'))
loaded = np.fromstring(fd.read(), dtype=np.uint8)
trY = loaded[8:].reshape((60000))
fd = gzip.open(os.path.join(data_dir,'t10k-images-idx3-ubyte.gz'))
loaded = np.fromstring(fd.read(), dtype=np.uint8)
teX = loaded[16:].reshape((10000, -1))
fd = gzip.open(os.path.join(data_dir,'t10k-labels-idx1-ubyte.gz'))
loaded = np.fromstring(fd.read(), dtype=np.uint8)
teY = loaded[8:].reshape((10000))
trX = trX/255.
teX = teX/255.
trX = trX.reshape(-1, 28, 28)
teX = teX.reshape(-1, 28, 28)
return trX, teX, trY, teY
|
data/Theano/Theano/theano/sandbox/rng_mrg.py
|
"""
Implementation of MRG31k3p random number generator for Theano.
Generator code in SSJ package (L'Ecuyer & Simard).
http://www.iro.umontreal.ca/~simardr/ssj/indexe.html
"""
from __future__ import absolute_import, print_function, division
import warnings
import numpy
from six import integer_types
from six.moves import xrange
from theano import Op, Apply, shared, config, Variable
from theano import gradient, function
from theano import tensor
from theano.tensor import (TensorType, as_tensor_variable, get_vector_length,
cast, opt, scal)
from theano.tensor import sqrt, log, sin, cos, join, prod
from theano.compile import optdb
from theano.gof import local_optimizer
from . import multinomial
import theano.sandbox.cuda
from theano.sandbox.cuda import GpuOp
from theano.sandbox.gpuarray.basic_ops import GpuKernelBase, Kernel
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.fp16_help import write_w
from theano.sandbox.gpuarray.opt import (register_opt as register_gpua,
host_from_gpu as host_from_gpua)
if theano.sandbox.cuda.cuda_available:
from theano.sandbox.cuda import (CudaNdarrayType,
float32_shared_constructor)
def matVecModM(A, s, m):
assert A.dtype == 'int64'
return numpy.int32(numpy.sum((A * s) % m, 1) % m)
def multMatVect(v, A, m1, B, m2):
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
multMatVect.dot_modulo = None
class DotModulo(Op):
"""
Efficient and numerically stable implementation of a dot product followed
by a modulo operation. This performs the same function as matVecModM.
We do this 2 times on 2 triple inputs and concatenating the output.
"""
__props__ = ()
def make_node(self, A, s, m, A2, s2, m2):
return Apply(self, [A, s, m, A2, s2, m2], [s.type()])
def perform(self, node, inputs, outputs):
(A, s, m, A2, s2, m2) = inputs
(out,) = outputs
o1 = matVecModM(A, s, m)
o2 = matVecModM(A2, s2, m2)
out[0] = numpy.concatenate((o1, o2))
def c_code_cache_version(self):
return (6,)
def c_code(self, node, name, inputs, outputs, sub):
(_A, _s, _m, _A2, _s2, _m2) = inputs
(_z,) = outputs
return """
int osize = -1;
if (PyArray_NDIM(%(_A)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_s)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(v) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_m)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, "rank(m) != 0"); %(fail)s;}
if (PyArray_NDIM(%(_A2)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(A2) != 2"); %(fail)s;}
if (PyArray_NDIM(%(_s2)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(v2) != 1"); %(fail)s;}
if (PyArray_NDIM(%(_m2)s) != 0) {PyErr_SetString(PyExc_NotImplementedError, "rank(m2) != 0"); %(fail)s;}
if( PyArray_DIMS(%(_A)s)[1] != PyArray_DIMS(%(_s)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "A and s shapes don't agree."); %(fail)s;}
if( PyArray_DIMS(%(_A2)s)[1] != PyArray_DIMS(%(_s2)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "A2 and s2 shapes don't agree."); %(fail)s;}
osize = PyArray_DIMS(%(_A)s)[0] + PyArray_DIMS(%(_A2)s)[0];
if (!%(_z)s
|| (PyArray_DIMS(%(_z)s)[0] != osize))
{
{Py_XDECREF(%(_z)s);}
npy_intp dims[] = {0,};
dims[0] = osize;
%(_z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, PyArray_TYPE(%(_s)s));
}
if(!%(_z)s){%(fail)s;}
{ //makes it compile even though labels jump over variable definitions.
// A has size MxN, s has N, output M
npy_intp M = PyArray_DIMS(%(_A)s)[0];
npy_intp N = PyArray_DIMS(%(_A)s)[1];
const dtype_%(_A)s* __restrict__ DA = (dtype_%(_A)s*)PyArray_DATA(%(_A)s);
dtype_%(_s)s* __restrict__ Ds = (dtype_%(_s)s*)PyArray_DATA(%(_s)s);
dtype_%(_z)s* __restrict__ Dz = (dtype_%(_z)s*)PyArray_DATA(%(_z)s);
const dtype_%(_m)s m = ((dtype_%(_m)s*)PyArray_DATA(%(_m)s))[0];
npy_intp SA = PyArray_STRIDES(%(_A)s)[1] / PyArray_DESCR(%(_A)s)->elsize;
npy_intp Ss = PyArray_STRIDES(%(_s)s)[0] / PyArray_DESCR(%(_s)s)->elsize;
npy_intp Sz = PyArray_STRIDES(%(_z)s)[0] / PyArray_DESCR(%(_z)s)->elsize;
for (npy_int32 i = 0; i < M; ++i)
{
const dtype_%(_A)s* __restrict__ Ak = (dtype_%(_A)s*)(PyArray_BYTES(%(_A)s) + PyArray_STRIDES(%(_A)s)[0] * i);
npy_int64 r = 0;
for (npy_int32 j = 0; j < N; ++j)
{
r += (npy_int64)(Ds[j * Ss] * (npy_int64)(Ak[j * SA])) %% m;
}
Dz[i * Sz] = r %% m;
}
}
//redo it with the second triple of inputs
{
// A has size MxN, s has N, output M
npy_intp M = PyArray_DIMS(%(_A2)s)[0];
npy_intp N = PyArray_DIMS(%(_A2)s)[1];
const dtype_%(_A2)s* __restrict__ DA = (dtype_%(_A2)s*)PyArray_DATA(%(_A2)s);
dtype_%(_s2)s* __restrict__ Ds = (dtype_%(_s2)s*)PyArray_DATA(%(_s2)s);
const dtype_%(_m2)s m = ((dtype_%(_m2)s*)PyArray_DATA(%(_m2)s))[0];
npy_intp SA = PyArray_STRIDES(%(_A2)s)[1] / PyArray_DESCR(%(_A2)s)->elsize;
npy_intp Ss = PyArray_STRIDES(%(_s2)s)[0] / PyArray_DESCR(%(_s2)s)->elsize;
npy_intp Sz = PyArray_STRIDES(%(_z)s)[0] / PyArray_DESCR(%(_z)s)->elsize;
dtype_%(_z)s* __restrict__ Dz = (dtype_%(_z)s*)PyArray_DATA(%(_z)s) + PyArray_DIMS(%(_A)s)[0] * Sz;
for (npy_int32 i = 0; i < M; ++i)
{
const dtype_%(_A2)s* __restrict__ Ak = (dtype_%(_A2)s*)(PyArray_BYTES(%(_A2)s) + PyArray_STRIDES(%(_A2)s)[0] * i);
npy_int64 r = 0;
for (npy_int32 j = 0; j < N; ++j)
{
r += (npy_int64)(Ds[j * Ss] * (npy_int64)(Ak[j * SA])) %% m;
}
Dz[i * Sz] = r %% m;
}
}
""" % dict(locals(), **sub)
M1 = numpy.asarray(numpy.int32(2147483647))
M2 = numpy.asarray(numpy.int32(2147462579))
MASK12 = numpy.int32(511)
MASK13 = numpy.int32(16777215)
MASK2 = numpy.int32(65535)
MULT2 = numpy.int32(21069)
NORM = 4.656612873077392578125e-10
A1p72 = numpy.asarray([[1516919229, 758510237, 499121365],
[1884998244, 1516919229, 335398200],
[601897748, 1884998244, 358115744]],
dtype='int64')
A2p72 = numpy.asarray([[1228857673, 1496414766, 954677935],
[1133297478, 1407477216, 1496414766],
[2002613992, 1639496704, 1407477216]],
dtype='int64')
A1p134 = numpy.asarray(
[[1702500920, 1849582496, 1656874625],
[828554832, 1702500920, 1512419905],
[1143731069, 828554832, 102237247]],
dtype='int64')
A2p134 = numpy.asarray(
[[796789021, 1464208080, 607337906],
[1241679051, 1431130166, 1464208080],
[1401213391, 1178684362, 1431130166]],
dtype='int64')
np_int32_vals = [numpy.int32(i) for i in (0, 7, 9, 15, 16, 22, 24)]
def ff_2p134(rstate):
return multMatVect(rstate, A1p134, M1, A2p134, M2)
def ff_2p72(rstate):
return multMatVect(rstate, A1p72, M1, A2p72, M2)
def mrg_next_value(rstate, new_rstate):
x11, x12, x13, x21, x22, x23 = rstate
assert type(x11) == numpy.int32
i0, i7, i9, i15, i16, i22, i24 = np_int32_vals
y1 = (((x12 & MASK12) << i22) + (x12 >> i9) +
((x13 & MASK13) << i7) + (x13 >> i24))
assert type(y1) == numpy.int32
if (y1 < 0 or y1 >= M1):
y1 -= M1
y1 += x13
if (y1 < 0 or y1 >= M1):
y1 -= M1
x13 = x12
x12 = x11
x11 = y1
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16))
assert type(y1) == numpy.int32
if (y1 < 0 or y1 >= M2):
y1 -= M2
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16))
assert type(y2) == numpy.int32
if (y2 < 0 or y2 >= M2):
y2 -= M2
y2 += x23
if (y2 < 0 or y2 >= M2):
y2 -= M2
y2 += y1
if (y2 < 0 or y2 >= M2):
y2 -= M2
x23 = x22
x22 = x21
x21 = y2
new_rstate[...] = [x11, x12, x13, x21, x22, x23]
assert new_rstate.dtype == numpy.int32
if (x11 <= x21):
return (x11 - x21 + M1) * NORM
else:
return (x11 - x21) * NORM
class mrg_uniform_base(Op):
__props__ = ("output_type", "inplace")
def __init__(self, output_type, inplace=False):
Op.__init__(self)
self.output_type = output_type
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
self.warned_numpy_version = False
def __str__(self):
if self.inplace:
s = "inplace"
else:
s = "no_inplace"
return self.__class__.__name__ + "{%s,%s}" % (self.output_type, s)
def make_node(self, rstate, size):
return Apply(self,
[rstate, size],
[rstate.type(), self.output_type()])
def grad(self, inputs, ograd):
return [gradient.grad_undefined(self, k, inp,
'No gradient defined through '
'random sampling op')
for k, inp in enumerate(inputs)]
def R_op(self, inputs, eval_points):
return [None for i in eval_points]
class mrg_uniform(mrg_uniform_base):
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(TensorType(dtype, (False,) * ndim))
return op(rstate, v_size)
def perform(self, node, inp, out):
rstate, size = inp
o_rstate, o_sample = out
n_elements = 1
for s in size:
n_elements *= s
if n_elements > M1:
raise ValueError("rng_mrg does not support more then (2**31 -1) samples")
rstate = numpy.asarray(rstate)
if not self.inplace:
rstate = rstate.copy()
n_streams, _ = rstate.shape
rval = numpy.zeros(n_elements, dtype=self.output_type.dtype)
err_orig = numpy.seterr(over='ignore')
try:
for i in xrange(n_elements):
sample = mrg_next_value(rstate[i % n_streams],
rstate[i % n_streams])
rval[i] = sample
finally:
numpy.seterr(**err_orig)
o_rstate[0] = node.outputs[0].type.filter(rstate)
o_sample[0] = node.outputs[1].type.filter(rval.reshape(size))
def c_code(self, node, name, inp, out, sub):
rstate, size = inp
assert isinstance(node.inputs[0].type, TensorType)
o_rstate, o_sample = out
if self.inplace:
o_rstate_requirement = (
'NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ALIGNED')
else:
o_rstate_requirement = (
'NPY_ARRAY_ENSURECOPY|NPY_ARRAY_C_CONTIGUOUS|'
'NPY_ARRAY_ALIGNED')
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
NORM = '4.6566126e-10f'
else:
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
//////// <code generated by mrg_uniform>
// The +1 is to avoid odims[0] which fails on windows
// We have to read size[i] as an int64, but odims has to be intp*
// for NumPy on 32-bit platforms.
npy_intp odims[%(ndim)s+1];
npy_int64 odims_i;
npy_int64 n_elements = 1;
int n_streams = 0;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| (PyArray_NDIM(%(o_sample)s) != %(ndim)s)
|| !(PyArray_ISCONTIGUOUS(%(o_sample)s)));
%(otype)s * sample_data;
npy_int32 * state_data;
const npy_int32 i0 = 0;
const npy_int32 i7 = 7;
const npy_int32 i9 = 9;
const npy_int32 i15 = 15;
const npy_int32 i16 = 16;
const npy_int32 i22 = 22;
const npy_int32 i24 = 24;
const npy_int32 M1 = 2147483647; //2^31 - 1
const npy_int32 M2 = 2147462579; //2^31 - 21069
const npy_int32 MASK12 = 511; //2^9 - 1
const npy_int32 MASK13 = 16777215; //2^24 - 1
const npy_int32 MASK2 = 65535; //2^16 - 1
const npy_int32 MULT2 = 21069;
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (PyArray_DIMS(%(size)s)[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, int(PyArray_DIMS(%(size)s)[0]));
%(fail)s
}
for (int i = 0; i < %(ndim)s; ++i)
{
odims_i = *(dtype_%(size)s *)PyArray_GETPTR1(%(size)s, i);
odims[i] = odims_i;
n_elements *= odims_i;
must_alloc_sample = must_alloc_sample || (PyArray_DIMS(%(o_sample)s)[i] != odims[i]);
//fprintf(stderr, "size %%i %%i\\n", i, (int)odims[i]);
//printf("%%li", n_elements);
}
//fprintf(stderr, "n_elements %%lld\\n", (long long)n_elements);
if (n_elements > M1)
{
PyErr_SetString(
PyExc_ValueError,
"rng_mrg cpu-implementation does not support more than (2**31 -1) samples");
%(fail)s
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = (PyArrayObject*)PyArray_SimpleNew(%(ndim)s, odims, %(o_type_num)s);
if(!%(o_sample)s) {
PyErr_SetString(PyExc_MemoryError, "failed to alloc mrg_uniform output");
%(fail)s
}
}
Py_XDECREF(%(o_rstate)s);
%(o_rstate)s = (PyArrayObject*)PyArray_FromAny(
(PyObject*)%(rstate)s,
NULL, 0, 0, %(o_rstate_requirement)s,NULL);
if (PyArray_NDIM(%(o_rstate)s) != 2)
{
PyErr_SetString(PyExc_ValueError, "rstate must be matrix");
%(fail)s
}
if (PyArray_DIMS(%(o_rstate)s)[1] != 6)
{
PyErr_Format(PyExc_ValueError, "rstate must have 6 columns");
%(fail)s
}
if (PyArray_DESCR(%(o_rstate)s)->type_num != NPY_INT32)
{
PyErr_SetString(PyExc_ValueError, "rstate must be int32");
%(fail)s
}
n_streams = PyArray_DIMS(%(o_rstate)s)[0];
sample_data = (%(otype)s *) PyArray_DATA(%(o_sample)s);
state_data = (npy_int32 *) PyArray_DATA(%(o_rstate)s);
for (int i = 0; i < n_elements; ++i)
{
npy_int32 * state_data_i = state_data + (i%%n_streams)*6;
npy_int32 y1, y2, x11, x12, x13, x21, x22, x23;
x11 = state_data_i[0];
x12 = state_data_i[1];
x13 = state_data_i[2];
x21 = state_data_i[3];
x22 = state_data_i[4];
x23 = state_data_i[5];
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
if ((y1 < 0 || y1 >= M1)) //must also check overflow
y1 -= M1;
y1 += x13;
if ((y1 < 0 or y1 >= M1))
y1 -= M1;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
if (y1 < 0 || y1 >= M2)
y1 -= M2;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
if (y2 < 0 || y2 >= M2)
y2 -= M2;
y2 += x23;
if (y2 < 0 || y2 >= M2)
y2 -= M2;
y2 += y1;
if (y2 < 0 or y2 >= M2)
y2 -= M2;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
assert((x11 - x21 + M1) <= M1);
sample_data[i] = (x11 - x21 + M1) * %(NORM)s;
}
else
{
assert(x11 - x21 <= M1);
sample_data[i] = (x11 - x21) * %(NORM)s;
}
state_data_i[0]= x11;
state_data_i[1]= x12;
state_data_i[2]= x13;
state_data_i[3]= x21;
state_data_i[4]= x22;
state_data_i[5]= x23;
}
//////// </ code generated by mrg_uniform>
""" % locals()
def c_code_cache_version(self):
return (8, )
class GPU_mrg_uniform(mrg_uniform_base, GpuOp):
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(CudaNdarrayType((False,) * ndim))
return op(rstate, v_size)
def c_support_code_apply(self, node, nodename):
if self.output_type.dtype == 'float32':
otype = 'float'
NORM = '4.6566126e-10f'
else:
otype = 'double'
NORM = '4.656612873077392578125e-10'
return """
// FB: I disable the printing of the warning, as we
//receive too much email about this and this don't help
//people. I'm not even sure if the "fix" to give the info about
//the shape statically give a speed up. So I consider this
//warning as useless until proved it can speed the user code.
static int %(nodename)s_printed_warning = 1;
static __global__ void %(nodename)s_mrg_uniform(
%(otype)s*sample_data,
npy_int32*state_data,
const int Nsamples,
const int Nstreams_used)
{
const npy_int32 i0 = 0;
const npy_int32 i7 = 7;
const npy_int32 i9 = 9;
const npy_int32 i15 = 15;
const npy_int32 i16 = 16;
const npy_int32 i22 = 22;
const npy_int32 i24 = 24;
const npy_int32 M1 = 2147483647; //2^31 - 1
const npy_int32 M2 = 2147462579; //2^31 - 21069
const npy_int32 MASK12 = 511; //2^9 - 1
const npy_int32 MASK13 = 16777215; //2^24 - 1
const npy_int32 MASK2 = 65535; //2^16 - 1
const npy_int32 MULT2 = 21069;
const unsigned int numThreads = blockDim.x * gridDim.x;
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
npy_int32 y1, y2, x11, x12, x13, x21, x22, x23;
if (idx < Nstreams_used)
{
x11 = state_data[idx*6+0];
x12 = state_data[idx*6+1];
x13 = state_data[idx*6+2];
x21 = state_data[idx*6+3];
x22 = state_data[idx*6+4];
x23 = state_data[idx*6+5];
for (int i = idx; i < Nsamples; i += Nstreams_used)
{
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
y1 += x13;
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
y1 -= (y1 < 0 || y1 >= M2) ? M2 : 0;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += x23;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += y1;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
sample_data[i] = (x11 - x21 + M1) * %(NORM)s;
}
else
{
sample_data[i] = (x11 - x21) * %(NORM)s;
}
}
state_data[idx*6+0]= x11;
state_data[idx*6+1]= x12;
state_data[idx*6+2]= x13;
state_data[idx*6+3]= x21;
state_data[idx*6+4]= x22;
state_data[idx*6+5]= x23;
}
}
""" % locals()
def c_code(self, node, nodename, inp, out, sub):
rstate, size = inp
o_rstate, o_sample = out
inplace = int(self.inplace)
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
if self.output_type.dtype == 'float32':
otype = 'float'
else:
otype = 'double'
SYNC = "CNDA_THREAD_SYNC"
return """
//////// <code generated by mrg_uniform>
npy_int64 M1 = 2147483647; //2^31 - 1
// The +1 is to avoid odims[0] which fails on windows
npy_int64 odims[%(ndim)s+1];
npy_int64 n_elements = 1;
int n_streams, n_streams_used_in_this_call;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| !CudaNdarray_Check((PyObject*)%(o_sample)s)
|| !CudaNdarray_is_c_contiguous(%(o_sample)s)
|| (CudaNdarray_NDIM(%(o_sample)s) != %(ndim)s));
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (PyArray_DIMS(%(size)s)[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%i)",
%(ndim)s, PyArray_DIMS(%(size)s)[0]);
%(fail)s
}
for (int i = 0; i < %(ndim)s; ++i)
{
odims[i] = *(dtype_%(size)s *)PyArray_GETPTR1(%(size)s, i);
n_elements *= odims[i];
must_alloc_sample = (must_alloc_sample
|| CudaNdarray_HOST_DIMS(%(o_sample)s)[i] != odims[i]);
}
if (n_elements > M1)
{
PyErr_SetString(
PyExc_ValueError,
"rng_mrg gpu implementation does not support more than (2**31 -1) samples");
%(fail)s
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = (CudaNdarray*)CudaNdarray_NewDims(%(ndim)s, odims);
if(!%(o_sample)s)
{
%(fail)s;
}
}
if (!CudaNdarray_Check((PyObject*)%(rstate)s))
{
PyErr_Format(PyExc_ValueError, "rstate must be cudandarray");
%(fail)s;
}
Py_XDECREF(%(o_rstate)s);
if (%(inplace)s)
{
Py_INCREF(%(rstate)s);
%(o_rstate)s = %(rstate)s;
}
else
{
%(o_rstate)s = (CudaNdarray*)CudaNdarray_Copy(%(rstate)s);
if (!%(o_rstate)s) {
PyErr_SetString(PyExc_RuntimeError, "GPU_mrg_uniform: "
"could not copy rstate");
%(fail)s
}
}
if (CudaNdarray_NDIM(%(o_rstate)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "rstate must be vector");
%(fail)s;
}
if (CudaNdarray_HOST_DIMS(%(o_rstate)s)[0] %% 6)
{
PyErr_Format(PyExc_ValueError, "rstate len must be multiple of 6");
%(fail)s;
}
n_streams = CudaNdarray_HOST_DIMS(%(o_rstate)s)[0]/6;
n_streams_used_in_this_call = std::min(n_streams, (int)n_elements);
{
unsigned int threads_per_block = std::min((unsigned int)n_streams_used_in_this_call, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
unsigned int n_blocks = std::min(ceil_intdiv((unsigned int)n_streams_used_in_this_call, threads_per_block), (unsigned int)NUM_VECTOR_OP_BLOCKS);
if (n_streams > (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK * (unsigned int)NUM_VECTOR_OP_BLOCKS)
{
PyErr_Format(PyExc_ValueError, "On GPU, n_streams should be at most %%u",
(unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK * (unsigned int)NUM_VECTOR_OP_BLOCKS);
%(fail)s;
}
if (threads_per_block * n_blocks < n_streams)
{
if (! %(nodename)s_printed_warning)
fprintf(stderr, "WARNING: unused streams above %%i (Tune GPU_mrg get_n_streams)\\n", threads_per_block * n_blocks );
%(nodename)s_printed_warning = 1;
}
%(nodename)s_mrg_uniform<<<n_blocks,threads_per_block>>>(
CudaNdarray_DEV_DATA(%(o_sample)s),
(npy_int32*)CudaNdarray_DEV_DATA(%(o_rstate)s),
n_elements, n_streams_used_in_this_call);
}
%(SYNC)s;
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s.\\n", "mrg_uniform", cudaGetErrorString(err));
%(fail)s;
}
}
//////// </ code generated by mrg_uniform>
""" % locals()
def c_code_cache_version(self):
return (12,)
class GPUA_mrg_uniform(GpuKernelBase, mrg_uniform_base):
_f16_ok = True
def get_params(self, node):
return node.inputs[0].type.context
@classmethod
def new(cls, rstate, ndim, dtype, size):
v_size = as_tensor_variable(size)
if ndim is None:
ndim = get_vector_length(v_size)
op = cls(GpuArrayType(dtype, (False,) * ndim))
return op(rstate, v_size)
def c_headers(self):
return super(GPUA_mrg_uniform, self).c_headers() + ['numpy_compat.h']
def gpu_kernels(self, node, name):
write = write_w(self.output_type.dtype)
if self.output_type.dtype == 'float16':
otype = 'ga_half'
mask = '& 0x7fff'
NORM = '3.0518e-05f'
elif self.output_type.dtype == 'float32':
otype = 'float'
mask = ''
NORM = '4.6566126e-10f'
elif self.output_type.dtype == 'float64':
otype = 'double'
mask = ''
NORM = '4.656612873077392578125e-10'
else:
raise ValueError('Unsupported data type for output',
self.output_type.dtype)
code = """
KERNEL void mrg_uniform(
GLOBAL_MEM %(otype)s *sample_data,
GLOBAL_MEM ga_int *state_data,
const ga_uint Nsamples,
const ga_uint Nstreams_used)
{
/*
* The cluda backend makes sure that ga_int corresponds to
* a 32 bit signed type on the target device. It is not a
* variable width type.
*/
const ga_int i7 = 7;
const ga_int i9 = 9;
const ga_int i15 = 15;
const ga_int i16 = 16;
const ga_int i22 = 22;
const ga_int i24 = 24;
const ga_int M1 = 2147483647; //2^31 - 1
const ga_int M2 = 2147462579; //2^31 - 21069
const ga_int MASK12 = 511; //2^9 - 1
const ga_int MASK13 = 16777215; //2^24 - 1
const ga_int MASK2 = 65535; //2^16 - 1
const ga_int MULT2 = 21069;
const ga_uint idx = GID_0 * LDIM_0 + LID_0;
ga_int y1, y2, x11, x12, x13, x21, x22, x23;
if (idx < Nstreams_used)
{
x11 = state_data[idx*6+0];
x12 = state_data[idx*6+1];
x13 = state_data[idx*6+2];
x21 = state_data[idx*6+3];
x22 = state_data[idx*6+4];
x23 = state_data[idx*6+5];
for (ga_uint i = idx; i < Nsamples; i += Nstreams_used)
{
y1 = ((x12 & MASK12) << i22) + (x12 >> i9) + ((x13 & MASK13) << i7) + (x13 >> i24);
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
y1 += x13;
y1 -= (y1 < 0 || y1 >= M1) ? M1 : 0;
x13 = x12;
x12 = x11;
x11 = y1;
y1 = ((x21 & MASK2) << i15) + (MULT2 * (x21 >> i16));
y1 -= (y1 < 0 || y1 >= M2) ? M2 : 0;
y2 = ((x23 & MASK2) << i15) + (MULT2 * (x23 >> i16));
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += x23;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
y2 += y1;
y2 -= (y2 < 0 || y2 >= M2) ? M2 : 0;
x23 = x22;
x22 = x21;
x21 = y2;
if (x11 <= x21) {
sample_data[i] = %(write)s(((x11 - x21 + M1) %(mask)s) * %(NORM)s);
}
else
{
sample_data[i] = %(write)s(((x11 - x21) %(mask)s) * %(NORM)s);
}
}
state_data[idx*6+0]= x11;
state_data[idx*6+1]= x12;
state_data[idx*6+2]= x13;
state_data[idx*6+3]= x21;
state_data[idx*6+4]= x22;
state_data[idx*6+5]= x23;
}
}
""" % locals()
from pygpu import gpuarray
return [Kernel(code=code, name="mrg_uniform",
params=[gpuarray.GpuArray, gpuarray.GpuArray,
'uint32', 'uint32'],
flags=Kernel.get_flags(self.output_type.dtype, 'int32'))
]
def c_code(self, node, nodename, inp, out, sub):
rstate, size = inp
o_rstate, o_sample = out
inplace = int(self.inplace)
ndim = self.output_type.ndim
o_type_num = numpy.asarray(0, dtype=self.output_type.dtype).dtype.num
fail = sub['fail']
ctx = sub['params']
kname = self.gpu_kernels(node, nodename)[0].objvar
otypecode = str(self.output_type.typecode)
return """
npy_int64 M1 = 2147483647; //2^31 - 1
// The +1 is to avoid odims[0] which fails on windows
size_t odims[%(ndim)s+1];
size_t n_elements = 1;
unsigned int n_streams;
int must_alloc_sample = ((NULL == %(o_sample)s)
|| !pygpu_GpuArray_Check((PyObject*)%(o_sample)s)
|| !(%(o_sample)s->ga.flags & GA_C_CONTIGUOUS)
|| (PyGpuArray_NDIM(%(o_sample)s) != %(ndim)s));
if (PyArray_NDIM(%(size)s) != 1)
{
PyErr_SetString(PyExc_ValueError, "size must be vector");
%(fail)s
}
if (PyArray_DIMS(%(size)s)[0] != %(ndim)s)
{
PyErr_Format(PyExc_ValueError, "size must have length %%i (not %%li)",
%(ndim)s, PyArray_DIMS(%(size)s)[0]);
%(fail)s
}
for (int i = 0; i < %(ndim)s; ++i)
{
odims[i] = *(dtype_%(size)s *)PyArray_GETPTR1(%(size)s, i);
n_elements *= odims[i];
must_alloc_sample = (must_alloc_sample
|| PyGpuArray_DIMS(%(o_sample)s)[i] != odims[i]);
}
if (n_elements > M1)
{
PyErr_SetString(
PyExc_ValueError,
"rng_mrg gpu implementation does not support more than (2**31 -1) samples");
%(fail)s
}
if (must_alloc_sample)
{
Py_XDECREF(%(o_sample)s);
%(o_sample)s = pygpu_empty(%(ndim)s, odims, %(otypecode)s, GA_C_ORDER,
%(ctx)s, Py_None);
if(!%(o_sample)s)
{
%(fail)s;
}
}
if (!pygpu_GpuArray_Check((PyObject*)%(rstate)s))
{
PyErr_Format(PyExc_ValueError, "rstate must be gpuarray");
%(fail)s;
}
Py_XDECREF(%(o_rstate)s);
if (%(inplace)s)
{
Py_INCREF(%(rstate)s);
%(o_rstate)s = %(rstate)s;
}
else
{
%(o_rstate)s = pygpu_copy(%(rstate)s, GA_ANY_ORDER);
if (!%(o_rstate)s) {
%(fail)s
}
}
if (PyGpuArray_NDIM(%(o_rstate)s) != 2)
{
PyErr_SetString(PyExc_ValueError, "rstate must be a matrix");
%(fail)s
}
if (PyGpuArray_DIMS(%(o_rstate)s)[1] != 6)
{
PyErr_Format(PyExc_ValueError, "rstate must have 6 columns");
%(fail)s
}
if (%(o_rstate)s->ga.typecode != GA_INT) {
PyErr_Format(PyExc_ValueError, "rstate must be int32");
%(fail)s
}
if (!GpuArray_CHKFLAGS(&%(o_rstate)s->ga, GA_C_CONTIGUOUS)) {
PyErr_Format(PyExc_ValueError, "rstate must be C contiguous");
%(fail)s
}
n_streams = PyGpuArray_DIMS(%(o_rstate)s)[0];
if (n_streams > n_elements)
n_streams = n_elements;
{
void *args[4];
size_t ls = 0, gs = 0;
args[0] = %(o_sample)s->ga.data;
args[1] = %(o_rstate)s->ga.data;
args[2] = &n_elements;
args[3] = &n_streams;
int err = GpuKernel_sched(&%(kname)s, n_elements, &ls, &gs);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "GpuKernel_sched: %%s\\n",
GpuKernel_error(&%(kname)s, err));
%(fail)s
}
err = GpuKernel_call(&%(kname)s, 1, &ls, &gs, 0, args);
if (err != GA_NO_ERROR) {
PyErr_Format(PyExc_RuntimeError, "GpuKernel_call: %%s\\n",
GpuKernel_error(&%(kname)s, err));
%(fail)s
}
}
""" % locals()
def c_code_cache_version(self):
return (11,)
def guess_n_streams(size, warn=False):
"""
Return a guess at a good number of streams.
Parameters
----------
warn : bool, optional
If True, warn when a guess cannot be made (in which case we
return 60 * 256).
"""
if (isinstance(size, (tuple, list)) and
all([isinstance(i, integer_types) for i in size])):
r = 1
for s in size:
r *= s
if r > 6:
r = r // 6
return min(r, 60 * 256)
else:
if warn:
warnings.warn(
("MRG_RandomStreams Can't determine
"from size (%s), guessing 60*256") % str(size),
stacklevel=3)
return 60 * 256
class MRG_RandomStreams(object):
"""
Module component with similar interface to numpy.random
(numpy.random.RandomState).
Parameters
----------
seed : int or list of 6 int
A default seed to initialize the random state.
If a single int is given, it will be replicated 6 times.
The first 3 values of the seed must all be less than M1 = 2147483647,
and not all 0; and the last 3 values must all be less than
M2 = 2147462579, and not all 0.
"""
def updates(self):
return list(self.state_updates)
def __init__(self, seed=12345, use_cuda=None):
self.state_updates = []
super(MRG_RandomStreams, self).__init__()
self.default_instance_seed = seed
self.set_rstate(seed)
if use_cuda is None:
self.use_cuda = theano.sandbox.cuda.cuda_enabled
else:
self.use_cuda = use_cuda
def set_rstate(self, seed):
if isinstance(seed, integer_types):
if seed == 0:
raise ValueError('seed should not be 0', seed)
elif seed >= M2:
raise ValueError('seed should be less than %i' % M2, seed)
self.rstate = numpy.asarray([seed] * 6, dtype='int32')
elif len(seed) == 6:
if seed[0] == 0 and seed[1] == 0 and seed[2] == 0:
raise ValueError(
'The first 3 values of seed should not be all 0', seed)
if seed[3] == 0 and seed[4] == 0 and seed[5] == 0:
raise ValueError(
'The last 3 values of seed should not be all 0', seed)
if seed[0] >= M1 or seed[1] >= M1 or seed[2] >= M1:
raise ValueError(
'The first 3 values of seed should be less than %i' % M1,
seed)
if seed[3] >= M2 or seed[4] >= M2 or seed[5] >= M2:
raise ValueError(
'The last 3 values of seed should be less than %i' % M2,
seed)
self.rstate = numpy.asarray(seed, dtype='int32')
else:
raise TypeError("seed should be 1 integer or 6 integers")
def seed(self, seed=None):
"""
Re-initialize each random stream.
Parameters
----------
seed : None or integer in range 0 to 2**30
Each random stream will be assigned a unique state that depends
deterministically on this value.
Returns
-------
None
"""
if seed is None:
seed = self.default_instance_seed
self.set_rstate(seed)
for old_r, new_r, size, nstreams in self.state_updates:
if nstreams is None:
nstreams = self.n_streams(size)
rstates = self.get_substream_rstates(nstreams,
new_r.owner.outputs[1].dtype)
assert (old_r.get_value(borrow=True,
return_internal_type=True).shape ==
rstates.shape)
assert rstates.dtype == old_r.dtype
old_r.set_value(rstates, borrow=True)
def inc_rstate(self):
"""
Update self.rstate to be skipped 2^134 steps forward to the next stream
start.
"""
self.rstate = multMatVect(self.rstate, A1p134, M1, A2p134, M2)
assert self.rstate.dtype == numpy.int32
@theano.configparser.change_flags(compute_test_value='off')
def get_substream_rstates(self, n_streams, dtype, inc_rstate=True):
"""
Initialize a matrix in which each row is a MRG stream state,
and they are spaced by 2**72 samples.
"""
assert isinstance(dtype, str)
assert n_streams < 2**72
assert n_streams > 0
rval = numpy.zeros((n_streams, 6), dtype='int32')
rval[0] = self.rstate
if multMatVect.dot_modulo is None:
multMatVect(rval[0], A1p72, M1, A2p72, M2)
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A1p72
f.input_storage[2].storage[0] = M1
f.input_storage[3].storage[0] = A2p72
f.input_storage[5].storage[0] = M2
for i in xrange(1, n_streams):
v = rval[i - 1]
f.input_storage[1].storage[0] = v[:3]
f.input_storage[4].storage[0] = v[3:]
f.fn()
rval[i] = f.output_storage[0].storage[0]
if inc_rstate:
self.inc_rstate()
if self.use_cuda and dtype == 'float32':
rval = rval.flatten()
tmp_float_buf = numpy.frombuffer(rval.data, dtype='float32')
assert tmp_float_buf.shape == rval.shape
assert (tmp_float_buf.view('int32') == rval).all()
rval = tmp_float_buf
return rval
def n_streams(self, size):
return guess_n_streams(size)
def pretty_return(self, node_rstate, new_rstate, sample, size, nstreams):
sample.rstate = node_rstate
sample.update = (node_rstate, new_rstate)
self.state_updates.append((node_rstate, new_rstate, size, nstreams))
node_rstate.default_update = new_rstate
return sample
def uniform(self, size, low=0.0, high=1.0, ndim=None, dtype=None,
nstreams=None):
"""
Sample a tensor of given size whose element from a uniform
distribution between low and high.
If the size argument is ambiguous on the number of dimensions,
ndim may be a plain integer to supplement the missing information.
Parameters
----------
low
Lower bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``low`` will be cast into
dtype. This bound is excluded.
high
Higher bound of the interval on which values are sampled.
If the ``dtype`` arg is provided, ``high`` will be cast into
dtype. This bound is excluded.
size
Can be a list of integer or Theano variable (ex: the shape
of other Theano Variable).
dtype
The output data type. If dtype is not specified, it will be
inferred from the dtype of low and high, but will be at
least as precise as floatX.
"""
low = as_tensor_variable(low)
high = as_tensor_variable(high)
if dtype is None:
dtype = scal.upcast(config.floatX, low.dtype, high.dtype)
low = cast(low, dtype=dtype)
high = cast(high, dtype=dtype)
if isinstance(size, tuple):
msg = "size must be a tuple of int or a Theano variable"
assert all([isinstance(i, (numpy.integer, integer_types, Variable))
for i in size]), msg
if any([isinstance(i, (numpy.integer, integer_types)) and i <= 0
for i in size]):
raise ValueError(
"The specified size contains a dimension with value <= 0",
size)
else:
if not (isinstance(size, Variable) and size.ndim == 1):
raise TypeError("size must be a tuple of int or a Theano "
"Variable with 1 dimension, got " + str(size) +
" of type " + str(type(size)))
orig_nstreams = nstreams
if nstreams is None:
nstreams = self.n_streams(size)
rstates = self.get_substream_rstates(nstreams, dtype)
if self.use_cuda and dtype == 'float32':
node_rstate = float32_shared_constructor(rstates)
assert isinstance(node_rstate.type, CudaNdarrayType)
u = self.pretty_return(node_rstate,
*GPU_mrg_uniform.new(node_rstate,
ndim, dtype, size),
size=size, nstreams=orig_nstreams)
else:
node_rstate = shared(rstates)
u = self.pretty_return(node_rstate,
*mrg_uniform.new(node_rstate,
ndim, dtype, size),
size=size, nstreams=orig_nstreams)
node_rstate.tag.is_rng = True
r = u * (high - low) + low
if u.type.broadcastable != r.type.broadcastable:
raise NotImplementedError(
'Increase the size to match the broadcasting pattern of '
'`low` and `high` arguments')
assert r.dtype == dtype
return r
def binomial(self, size=None, n=1, p=0.5, ndim=None, dtype='int64',
nstreams=None):
if n == 1:
if dtype == 'float32' and self.use_cuda:
x = self.uniform(size=size, dtype=dtype, nstreams=nstreams)
else:
x = self.uniform(size=size, nstreams=nstreams)
return cast(x < p, dtype)
else:
raise NotImplementedError("MRG_RandomStreams.binomial with n > 1")
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None):
"""
Sample `n` (`n` needs to be >= 1, default 1) times from a multinomial
distribution defined by probabilities pvals.
Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will
probably result in [[1,0,0],[0,0,1]]. When setting n=2, this
will probably result in [[2,0,0],[0,1,1]].
Notes
-----
-`size` and `ndim` are only there keep the same signature as other
uniform, binomial, normal, etc.
TODO : adapt multinomial to take that into account
-Does not do any value checking on pvals, i.e. there is no
check that the elements are non-negative, less than 1, or
sum to 1. passing pvals = [[-2., 2.]] will result in
sampling [[0, 0]]
"""
if pvals is None:
raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals)
if size is not None:
if any([isinstance(i, integer_types) and i <= 0 for i in size]):
raise ValueError(
"The specified size contains a dimension with value <= 0",
size)
if size is not None:
raise ValueError(
"Provided a size argument to MRG_RandomStreams.multinomial, "
"which does not use the size argument.")
if ndim is not None:
raise ValueError(
"Provided an ndim argument to MRG_RandomStreams.multinomial, "
"which does not use the ndim argument.")
if pvals.ndim == 2:
size = pvals[:, 0].shape * n
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialFromUniform(dtype)
n_samples = as_tensor_variable(n)
return op(pvals, unis, n_samples)
else:
raise NotImplementedError(("MRG_RandomStreams.multinomial only"
" implemented for pvals.ndim = 2"))
def multinomial_wo_replacement(self, size=None, n=1, pvals=None,
ndim=None, dtype='int64', nstreams=None):
"""
Sample `n` times *WITHOUT replacement* from a multinomial distribution
defined by probabilities pvals, and returns the indices of the sampled
elements.
`n` needs to be in [1, m], where m is the number of elements to select
from, i.e. m == pvals.shape[1]. By default n = 1.
Example : pvals = [[.98, .01, .01], [.01, .49, .50]] and n=1 will
probably result in [[0],[2]]. When setting n=2, this
will probably result in [[0,1],[2,1]].
Notes
-----
-`size` and `ndim` are only there keep the same signature as other
uniform, binomial, normal, etc.
TODO : adapt multinomial to take that into account
-Does not do any value checking on pvals, i.e. there is no
check that the elements are non-negative, less than 1, or
sum to 1. passing pvals = [[-2., 2.]] will result in
sampling [[0, 0]]
"""
if pvals is None:
raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals)
if size is not None:
raise ValueError("Provided a size argument to "
"MRG_RandomStreams.multinomial_wo_replacement, "
"which does not use the size argument.")
if ndim is not None:
raise ValueError("Provided an ndim argument to "
"MRG_RandomStreams.multinomial_wo_replacement, "
"which does not use the ndim argument.")
if pvals.ndim == 2:
size = pvals[:, 0].shape * n
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialWOReplacementFromUniform(dtype)
n_samples = as_tensor_variable(n)
return op(pvals, unis, n_samples)
else:
raise NotImplementedError(
"MRG_RandomStreams.multinomial_wo_replacement only implemented"
" for pvals.ndim = 2")
def normal(self, size, avg=0.0, std=1.0, ndim=None,
dtype=None, nstreams=None):
"""
Parameters
----------
size
Can be a list of integers or Theano variables (ex: the shape
of another Theano Variable).
dtype
The output data type. If dtype is not specified, it will be
inferred from the dtype of low and high, but will be at
least as precise as floatX.
nstreams
Number of streams.
"""
avg = as_tensor_variable(avg)
std = as_tensor_variable(std)
if dtype is None:
dtype = scal.upcast(config.floatX, avg.dtype, std.dtype)
avg = cast(avg, dtype)
std = cast(std, dtype)
evened = False
constant = False
if (isinstance(size, tuple) and
all([isinstance(i, (numpy.integer, integer_types)) for i in size])):
constant = True
n_samples = numpy.prod(size, dtype='int64')
if n_samples % 2 == 1:
n_samples += 1
evened = True
else:
n_samples = prod(size) + (prod(size) % 2)
flattened = self.uniform(size=(n_samples,), dtype=dtype,
nstreams=nstreams)
if constant:
U1 = flattened[:n_samples // 2]
U2 = flattened[n_samples // 2:]
else:
U1 = flattened[:prod(flattened.shape) // 2]
U2 = flattened[prod(flattened.shape) // 2:]
sqrt_ln_U1 = sqrt(-2.0 * log(U1))
first_half = sqrt_ln_U1 * cos(
numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
second_half = sqrt_ln_U1 * sin(
numpy.array(2.0 * numpy.pi, dtype=dtype) * U2)
normal_samples = join(0, first_half, second_half)
final_samples = None
if evened:
final_samples = normal_samples[:-1]
elif constant:
final_samples = normal_samples
else:
final_samples = normal_samples[:prod(size)]
if not size:
size = tensor.constant(size, dtype='int64')
final_samples = final_samples.reshape(size)
final_samples = avg + std * final_samples
assert final_samples.dtype == dtype
return final_samples
@register_gpua('fast_compile')
@local_optimizer([mrg_uniform])
def local_gpua_mrg(node):
if (type(node.op) == mrg_uniform and
isinstance(node.inputs[0].type, GpuArrayType)):
outs = GPUA_mrg_uniform.new(node.inputs[0],
node.op.output_type.ndim,
node.op.output_type.dtype,
node.inputs[1])
return [outs[0], host_from_gpua(outs[1])]
MRG_RNGs = (mrg_uniform, GPU_mrg_uniform, GPUA_mrg_uniform)
@local_optimizer(MRG_RNGs)
def mrg_random_make_inplace(node):
op = node.op
if isinstance(op, MRG_RNGs) and not op.inplace:
new_op = op.__class__(op.output_type, inplace=True)
return new_op.make_node(*node.inputs).outputs
return False
optdb.register('random_make_inplace_mrg',
opt.in2out(mrg_random_make_inplace, ignore_newtrees=True),
99, 'fast_run', 'inplace')
|
data/PyHDI/PyCoRAM/examples/app/stencil-9p/stencil-9p-separate/cthread_st.py
|
DSIZE = 4
SIZE = 512
a_offset = 1 * 1024 * 1024
b_offset = 2 * 1024 * 1024
iochannel = CoramIoChannel(idx=0, datawidth=32)
mem0 = CoramMemory(idx=0, datawidth=8*DSIZE, size=SIZE)
mem1 = CoramMemory(idx=1, datawidth=8*DSIZE, size=SIZE)
mem2 = CoramMemory(idx=2, datawidth=8*DSIZE, size=SIZE)
mem3 = CoramMemory(idx=3, datawidth=8*DSIZE, size=SIZE)
mem_d0 = CoramMemory(idx=4, datawidth=8*DSIZE, size=SIZE)
mem_d1 = CoramMemory(idx=5, datawidth=8*DSIZE, size=SIZE)
channel = CoramChannel(idx=0, datawidth=8*DSIZE)
def st_set_mesh_size(mesh_size):
channel.write(mesh_size)
def st_step(mesh_size, read_start, write_start):
read_page = 3
write_page = 0
read_addr = read_start
mem0.write(0, read_addr, mesh_size)
read_addr += mesh_size * DSIZE
mem1.write(0, read_addr, mesh_size)
read_addr += mesh_size * DSIZE
mem2.write(0, read_addr, mesh_size)
read_addr += mesh_size * DSIZE
write_addr = write_start + mesh_size * DSIZE + DSIZE
for i in range(mesh_size - 2):
hot_spot = 1 if i == 0 else 0
pos = ( (hot_spot << 6) |
((0x1 << write_page) << 4) |
(0x1 << read_page) )
mem0.wait()
mem1.wait()
mem2.wait()
mem3.wait()
channel.write(pos)
if read_page == 0:
mem0.write_nonblocking(0, read_addr, mesh_size)
elif read_page == 1:
mem1.write_nonblocking(0, read_addr, mesh_size)
elif read_page == 2:
mem2.write_nonblocking(0, read_addr, mesh_size)
elif read_page == 3:
mem3.write_nonblocking(0, read_addr, mesh_size)
read_page = 0 if read_page == 3 else read_page + 1
read_addr += mesh_size * DSIZE
channel.read()
mem_d0.wait()
mem_d1.wait()
if write_page == 0:
mem_d0.read_nonblocking(1, write_addr, mesh_size-2)
elif write_page == 1:
mem_d1.read_nonblocking(1, write_addr, mesh_size-2)
write_addr += mesh_size * DSIZE
write_page = 0 if write_page == 1 else write_page + 1
mem_d0.wait()
mem_d1.wait()
def st_computation(num_iter, mesh_size):
for i in range(num_iter / 2):
st_step(mesh_size, a_offset, b_offset)
st_step(mesh_size, b_offset, a_offset)
def st_sum(mesh_size):
check_sum = 0
read_addr = a_offset
for i in range(mesh_size):
mem0.write(0, read_addr, mesh_size)
init_sum = 1 if i == 0 else 0
calc_sum = 1
pos = (init_sum << 8) | (calc_sum << 7)
channel.write(pos)
read_addr += mesh_size * DSIZE
check_sum = channel.read()
channel.write(0)
return check_sum
def st_main():
global a_offset
global b_offset
mesh_size = iochannel.read()
print("thread: mesh_size=%d" % mesh_size)
num_iter = iochannel.read()
print("thread: num_iter=%d" % num_iter)
a_offset = iochannel.read()
print("thread: a_offset=%d" % a_offset)
b_offset = iochannel.read()
print("thread: b_offset=%d" % b_offset)
print("thread: st_set_mesh_size")
st_set_mesh_size(mesh_size)
print("thread: st_computation")
st_computation(num_iter, mesh_size)
print("thread: st_sum")
check_sum = st_sum(mesh_size)
iochannel.write(check_sum)
while True:
st_main()
|
data/OpenMDAO/OpenMDAO/openmdao/surrogate_models/multifi_cokriging.py
|
"""
This module integrates the Multi-Fidelity Co-Kriging method described in
[LeGratiet2013].
(Author: Remi Vauclin <vauclin.remi@gmail.com>)
This code was implemented using the package scikit-learn as basis.
(Author: Vincent Dubourg <vincent.dubourg@gmail.com>)
OpenMDAO adaptation. Regression and correlation functions were directly copied
from scikit-learn package here to avoid scikit-learn dependency.
(Author: Remi Lafage <remi.lafage@onera.fr>)
ISAE/DMSM - ONERA/DCPS
"""
import numpy as np
from numpy import atleast_2d as array2d
from scipy import linalg
from scipy.optimize import minimize
from scipy.spatial.distance import squareform
from openmdao.surrogate_models.surrogate_model import MultiFiSurrogateModel
import logging
_logger = logging.getLogger()
MACHINE_EPSILON = np.finfo(np.double).eps
NUGGET= 10.* MACHINE_EPSILON
INITIAL_RANGE_DEFAULT = 0.3
TOLERANCE_DEFAULT = 1e-6
THETA0_DEFAULT = 0.5
THETAL_DEFAULT = 1e-5
THETAU_DEFAULT = 50
if hasattr(linalg, 'solve_triangular'):
solve_triangular = linalg.solve_triangular
else:
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def constant_regression(x):
"""
Zero order polynomial (constant, p = 1) regression model.
x --> f(x) = 1
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.ones([n_eval, 1])
return f
def linear_regression(x):
"""
First order polynomial (linear, p = n+1) regression model.
x --> f(x) = [ 1, x_1, ..., x_n ].T
"""
x = np.asarray(x, dtype=np.float)
n_eval = x.shape[0]
f = np.hstack([np.ones([n_eval, 1]), x])
return f
def squared_exponential_correlation(theta, d):
"""
Squared exponential correlation model (Radial Basis Function).
(Infinitely differentiable stochastic process, very smooth)::
n
theta, dx --> r(theta, dx) = exp( sum - theta_i * (dx_i)^2 )
i = 1
Args
----
theta: array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
dx: array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r: array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation model.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
return np.exp(-theta[0] * np.sum(d ** 2, axis=1))
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
return np.exp(-np.sum(theta.reshape(1, n_features) * d ** 2, axis=1))
def l1_cross_distances(X, Y=None):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X and Y.
Args
----
X: array_like
An array with shape (n_samples_X, n_features)
Y: array_like
An array with shape (n_samples_Y, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
"""
if Y is None:
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):])
return D
else:
X = array2d(X)
Y = array2d(Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise ValueError("X and Y must have the same dimensions.")
n_features = n_features_X
n_nonzero_cross_dist = n_samples_X * n_samples_Y
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples_X):
ll_0 = ll_1
ll_1 = ll_0 + n_samples_Y
D[ll_0:ll_1] = np.abs(X[k] - Y)
return D
class MultiFiCoKriging(object):
"""
This class integrates the Multi-Fidelity Co-Kriging method described in
[LeGratiet2013]_.
Args
----
regr: string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis for Universal Kriging purpose.
regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'
rho_regr: string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. Defines the regression function for the
autoregressive parameter rho.
rho_regr is assumed to be the same for all levels of code.
Default assumes a simple constant regression trend.
Available built-in regression models are:
'constant', 'linear'
theta: double, array_like or list, optional
Value of correlation parameters if they are known; no optimization is run.
Default is None, so that optimization is run.
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level
theta0: double, array_like or list, optional
Starting point for the maximum likelihood estimation of the
best set of parameters.
Default is None and meaning use of the default 0.5*np.ones(n_features)
if double: value is replicated for all features and all levels.
if array_like: an array with shape (n_features, ) for
isotropic calculation. It is replicated for all levels.
if list: a list of nlevel arrays specifying value for each level
thetaL: double, array_like or list, optional
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of the default 1e-5*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level
thetaU: double, array_like or list, optional
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None meaning use of default value 50*np.ones(n_features).
if double: value is replicated for all features and all levels.
if array_like: An array with shape matching theta0's. It is replicated
for all levels of code.
if list: a list of nlevel arrays specifying value for each level
Attributes
----------
`theta`: list
Specified theta for each level OR the best set of autocorrelation parameters
(the sought maximizer of the reduced likelihood function).
`rlf_value`: list
The optimal negative concentrated reduced likelihood function value
for each level.
Examples
--------
>>> from openmdao.surrogate_models.multifi_cokriging import MultiFiCoKriging
>>> import numpy as np
>>>
>>>
>>>
>>>
>>> Xe = np.array([[0],[0.4],[1]])
>>> Xc = np.vstack((np.array([[0.1],[0.2],[0.3],[0.5],[0.6],[0.7],[0.8],[0.9]]),Xe))
>>> ye = ((Xe*6-2)**2)*np.sin((Xe*6-2)*2)
>>> yc = 0.5*((Xc*6-2)**2)*np.sin((Xc*6-2)*2)+(Xc-0.5)*10. - 5
>>> model = MultiFiCoKriging(theta0=1, thetaL=1e-5, thetaU=50.)
>>> model.fit([Xc, Xe], [yc, ye])
>>>
>>> np.abs(float(model.predict([0.05])[0])- ((0.05*6-2)**2)*np.sin((0.05*6-2)*2)) < 0.05
True
Notes
-----
Implementation is based on the Package Scikit-Learn
(Author: Vincent Dubourg <vincent.dubourg@gmail.com>) which translates
the DACE Matlab toolbox, see [NLNS2002]_.
References
----------
.. [NLNS2002] H. B. Nielsen, S. N. Lophaven, and J. Sondergaard.
`DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] W. J. Welch, R. J. Buck, J. Sacks, H. P. Wynn, T. J. Mitchell,
and M. D. Morris (1992). "Screening, predicting, and computer experiments."
`Technometrics,` 34(1) 15--25.
http://www.jstor.org/pss/1269548
.. [LeGratiet2013] L. Le Gratiet (2013). "Multi-fidelity Gaussian process
regression for computer experiments."
PhD thesis, Universite Paris-Diderot-Paris VII.
.. [TBKH2011] Toal, D. J., Bressloff, N. W., Keane, A. J., & Holden, C. M. E. (2011).
"The development of a hybridized particle swarm for kriging hyperparameter
tuning." `Engineering optimization`, 43(6), 675-699.
"""
_regression_types = {
'constant': constant_regression,
'linear': linear_regression}
def __init__(self, regr='constant', rho_regr='constant',
theta=None, theta0=None, thetaL=None, thetaU=None):
self.corr = squared_exponential_correlation
self.regr = regr
self.rho_regr = rho_regr
self.theta = theta
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self._nfev = 0
def _build_R(self, lvl, theta):
"""
Builds the correlation matrix with given theta for the specified level.
"""
D = self.D[lvl]
n_samples = self.n_samples[lvl]
R = np.eye(n_samples) * (1. + NUGGET)
corr = squareform(self.corr(theta, D))
R = R + corr
return R
def fit(self, X, y,
initial_range=INITIAL_RANGE_DEFAULT, tol=TOLERANCE_DEFAULT):
"""
The Multi-Fidelity co-kriging model fitting method.
Args
----
X: list of double array_like elements
A list of arrays with the input at which observations were made, from lowest
fidelity to highest fidelity. Designs must be nested
with X[i] = np.vstack([..., X[i+1])
y: list of double array_like elements
A list of arrays with the observations of the scalar output to be predicted,
from lowest fidelity to highest fidelity.
initial_range: float
Initial range for the optimizer.
tol: float
Optimizer terminates when the tolerance tol is reached.
"""
self._check_list_structure(X, y)
self._check_params()
X = self.X
y = self.y
nlevel = self.nlevel
n_samples = self.n_samples
self.beta = nlevel*[0]
self.beta_rho = nlevel*[None]
self.beta_regr = nlevel*[None]
self.C = nlevel*[0]
self.D = nlevel*[0]
self.F = nlevel*[0]
self.p = nlevel*[0]
self.q = nlevel*[0]
self.G = nlevel*[0]
self.sigma2 = nlevel*[0]
self._R_adj = nlevel*[None]
y_best = y[nlevel-1]
for i in range(nlevel-1)[::-1]:
y_best = np.concatenate((y[i][:-n_samples[i+1]],y_best))
self.y_best = y_best
self.y_mean = np.zeros(1)
self.y_std = np.ones(1)
self.X_mean = np.zeros(1)
self.X_std = np.ones(1)
for lvl in range(nlevel):
self.D[lvl] = l1_cross_distances(X[lvl])
if (np.min(np.sum(self.D[lvl], axis=1)) == 0.):
raise Exception("Multiple input features cannot have the same"
" value.")
self.F[lvl] = self.regr(X[lvl])
self.p[lvl] = self.F[lvl].shape[1]
if lvl > 0:
F_rho = self.rho_regr(X[lvl])
self.q[lvl] = F_rho.shape[1]
self.F[lvl] = np.hstack((F_rho*np.dot((self.y[lvl-1])[-n_samples[lvl]:],
np.ones((1,self.q[lvl]))), self.F[lvl]))
else:
self.q[lvl] = 0
n_samples_F_i = self.F[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if int(self.p[lvl] + self.q[lvl]) >= n_samples_F_i:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d.")
% (n_samples[i], self.p[lvl]+self.q[lvl]))
self.X = X
self.y = y
self.rlf_value = np.zeros(nlevel)
for lvl in range(nlevel):
if self.theta[lvl] is None:
sol = self._max_rlf(lvl=lvl, initial_range=initial_range, tol=tol)
self.theta[lvl] = sol['theta']
self.rlf_value[lvl] = sol['rlf_value']
if np.isinf(self.rlf_value[lvl]):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
self.rlf_value[lvl] = self.rlf(lvl=lvl)
if np.isinf(self.rlf_value[lvl]):
raise Exception("Bad point. Try increasing theta0.")
return
def rlf(self, lvl, theta=None):
"""
This function determines the BLUP parameters and evaluates the negative reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Args
----
self: Multi-Fidelity Co-Kriging object
lvl: Integer
Level of fidelity
theta: array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta``).
Returns
-------
rlf_value: double
The value of the negative concentrated reduced likelihood function
associated to the given autocorrelation parameters theta.
"""
if theta is None:
theta = self.theta[lvl]
rlf_value = 1e20
n_samples = self.n_samples[lvl]
y = self.y[lvl]
F = self.F[lvl]
p = self.p[lvl]
q = self.q[lvl]
R = self._build_R(lvl, theta)
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
_logger.warning(('Cholesky decomposition of R at level %i failed' % lvl) +
' with theta='+str(theta))
return rlf_value
Ft = solve_triangular(C, F, lower=True)
Yt = solve_triangular(C, y, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
Q, G = linalg.qr(Ft, mode='economic')
pass
beta = solve_triangular(G, np.dot(Q.T, Yt))
err = Yt - np.dot(Ft,beta)
err2 = np.dot(err.T, err)[0,0]
self._err = err
sigma2 = err2 /(n_samples - p - q)
detR = ((np.diag(C))**(2./n_samples)).prod()
rlf_value = (n_samples - p - q)*np.log10(sigma2) \
+ n_samples*np.log10(detR)
self.beta_rho[lvl] = beta[:q]
self.beta_regr[lvl] = beta[q:]
self.beta[lvl] = beta
self.sigma2[lvl] = sigma2
self.C[lvl] = C
self.G[lvl] = G
return rlf_value
def _max_rlf(self, lvl, initial_range, tol):
"""
This function estimates the autocorrelation parameter theta
as the maximizer of the reduced likelihood function of the given level (lvl).
(Minimization of the negative reduced likelihood function is used for convenience.)
Args
----
self: Most parameters are stored in the Gaussian Process model object.
lvl: integer
Level of fidelity
initial_range: float
Initial range of the optimizer
tol: float
Optimizer terminates when the tolerance tol is reached.
Returns
-------
optimal_theta: array_like
optimal_rlf_value: double
The optimal negative reduced likelihood function value.
res: dict
res['theta']: optimal theta
res['rlf_value']: optimal value for likelihood
"""
thetaL = self.thetaL[lvl]
thetaU = self.thetaU[lvl]
def rlf_transform(x):
return self.rlf(theta=10.**x, lvl=lvl)
theta0 = self.theta0[lvl]
x0 = np.log10(theta0[0])
constraints = []
for i in range(theta0.size):
constraints.append({'type': 'ineq', 'fun': lambda log10t,i=i:
log10t[i] - np.log10(thetaL[0][i])})
constraints.append({'type': 'ineq', 'fun': lambda log10t,i=i:
np.log10(thetaU[0][i]) - log10t[i]})
constraints = tuple(constraints)
sol = minimize(rlf_transform, x0, method='COBYLA',
constraints=constraints,
options={'rhobeg': initial_range,
'tol': tol, 'disp': 0})
log10_optimal_x = sol['x']
optimal_rlf_value = sol['fun']
self._nfev += sol['nfev']
optimal_theta = 10. ** log10_optimal_x
res = {}
res['theta'] = optimal_theta
res['rlf_value'] = optimal_rlf_value
return res
def predict(self, X, eval_MSE=True):
"""
This function performs the predictions of the kriging model on X.
Args
----
X: array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE: boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not. Default assumes evalMSE is True.
Returns
-------
y: array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at X. If all_levels is set to True, an array
with shape (n_eval, nlevel) giving the BLUP for all levels.
MSE: array_like, optional (if eval_MSE is True)
An array with shape (n_eval, ) with the Mean Squared Error at X.
If all_levels is set to True, an array with shape (n_eval, nlevel)
giving the MSE for all levels.
"""
X = array2d(X)
nlevel = self.nlevel
n_eval, n_features_X = X.shape
mu = np.zeros((n_eval, nlevel))
f = self.regr(X)
f0 = self.regr(X)
dx = l1_cross_distances(X, Y=self.X[0])
F = self.F[0]
C = self.C[0]
beta = self.beta[0]
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y[0], lower=True)
r_ = self.corr(self.theta[0], dx).reshape(n_eval, self.n_samples[0])
gamma = solve_triangular(C.T, yt - np.dot(Ft,beta), lower=False)
mu[:,0]= (np.dot(f, beta) + np.dot(r_,gamma)).ravel()
if eval_MSE:
self.sigma2_rho = nlevel*[None]
MSE = np.zeros((n_eval,nlevel))
r_t = solve_triangular(C, r_.T, lower=True)
G = self.G[0]
u_ = solve_triangular(G.T, f.T - np.dot(Ft.T, r_t), lower=True)
MSE[:,0] = self.sigma2[0] * (1 \
- (r_t**2).sum(axis=0) + (u_**2).sum(axis=0))
for i in range(1,nlevel):
C = self.C[i]
F = self.F[i]
g = self.rho_regr(X)
dx = l1_cross_distances(X, Y=self.X[i])
r_ = self.corr(self.theta[i], dx).reshape(n_eval, self.n_samples[i])
f = np.vstack((g.T*mu[:,i-1], f0.T))
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y[i], lower=True)
r_t = solve_triangular(C,r_.T, lower=True)
G = self.G[i]
beta = self.beta[i]
mu[:,i] = (np.dot(f.T, beta) \
+ np.dot(r_t.T, yt - np.dot(Ft,beta))).ravel()
if eval_MSE:
Q_ = (np.dot((yt-np.dot(Ft,beta)).T, yt-np.dot(Ft,beta)))[0,0]
u_ = solve_triangular(G.T, f - np.dot(Ft.T, r_t), lower=True)
sigma2_rho = np.dot(g, \
self.sigma2[i]*linalg.inv(np.dot(G.T,G))[:self.q[i],:self.q[i]] \
+ np.dot(beta[:self.q[i]], beta[:self.q[i]].T))
sigma2_rho = (sigma2_rho * g).sum(axis=1)
MSE[:,i] = sigma2_rho * MSE[:,i-1] \
+ Q_/(2*(self.n_samples[i]-self.p[i]-self.q[i])) \
* (1 - (r_t**2).sum(axis=0)) \
+ self.sigma2[i] * (u_**2).sum(axis=0)
for i in range(nlevel):
mu[:,i] = self.y_mean + self.y_std * mu[:,i]
if eval_MSE:
MSE[:,i] = self.y_std**2 * MSE[:,i]
if eval_MSE:
return mu[:,-1].reshape((n_eval,1)), MSE[:,-1].reshape((n_eval,1))
else:
return mu[:,-1].reshape((n_eval,1))
def _check_list_structure(self, X, y):
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype = int)
n_features = np.zeros(nlevel, dtype = int)
n_samples_y = np.zeros(nlevel, dtype = int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i>1 and n_features[i] != n_features[i-1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.n_features = n_features[0]
if type(self.theta) is not list:
self.theta = nlevel*[self.theta]
elif len(self.theta) != nlevel:
raise ValueError("theta must be a list of %d element(s)." % nlevel)
if type(self.theta0) is not list:
self.theta0 = nlevel*[self.theta0]
elif len(self.theta0) != nlevel:
raise ValueError("theta0 must be a list of %d elements." % nlevel)
if type(self.thetaL) is not list:
self.thetaL = nlevel*[self.thetaL]
elif len(self.thetaL) != nlevel:
raise ValueError("thetaL must be a list of %d elements." % nlevel)
if type(self.thetaU) is not list:
self.thetaU = nlevel*[self.thetaU]
elif len(self.thetaU) != nlevel:
raise ValueError("thetaU must be a list of %d elements." % nlevel)
self.nlevel = nlevel
self.X = X[:]
self.y = y[:]
self.n_samples = n_samples
return
def _check_params(self):
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
if not callable(self.rho_regr):
if self.rho_regr in self._regression_types:
self.rho_regr = self._regression_types[self.rho_regr]
else:
raise ValueError("rho_regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.rho_regr))
for i in range(self.nlevel):
if self.theta[i] is not None:
self.theta[i] = array2d(self.theta[i])
if np.any(self.theta[i] <= 0):
raise ValueError("theta0 must be strictly positive.")
if self.theta0[i] is not None:
self.theta0[i] = array2d(self.theta0[i])
if np.any(self.theta0[i] <= 0):
raise ValueError("theta0 must be strictly positive.")
else:
self.theta0[i] = array2d(self.n_features*[THETA0_DEFAULT])
lth = self.theta0[i].size
if self.thetaL[i] is not None:
self.thetaL[i] = array2d(self.thetaL[i])
if self.thetaL[i].size != lth:
raise ValueError("theta0 and thetaL must have the "
"same length.")
else:
self.thetaL[i] = array2d(self.n_features*[THETAL_DEFAULT])
if self.thetaU[i] is not None:
self.thetaU[i] = array2d(self.thetaU[i])
if self.thetaU[i].size != lth:
raise ValueError("theta0 and thetaU must have the "
"same length.")
else:
self.thetaU[i] = array2d(self.n_features*[THETAU_DEFAULT])
if np.any(self.thetaL[i] <= 0) or np.any(self.thetaU[i] < self.thetaL[i]):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
return
class MultiFiCoKrigingSurrogate(MultiFiSurrogateModel):
"""
OpenMDAO adapter of multi-fidelity recursive cokriging method described
in [LeGratiet2013]. See MultiFiCoKriging class.
"""
def __init__(self, regr='constant', rho_regr='constant',
theta=None, theta0=None, thetaL=None, thetaU=None,
tolerance=TOLERANCE_DEFAULT, initial_range=INITIAL_RANGE_DEFAULT):
super(MultiFiCoKrigingSurrogate, self).__init__()
self.tolerance=tolerance
self.initial_range=initial_range
self.model = MultiFiCoKriging(regr=regr,rho_regr=rho_regr, theta=theta,
theta0=theta0, thetaL=thetaL, thetaU=thetaU)
def predict(self, new_x):
"""Calculates a predicted value of the response based on the current
trained model for the supplied list of inputs.
"""
Y_pred, MSE = self.model.predict([new_x])
return Y_pred, np.sqrt(np.abs(MSE))
def train_multifi(self,X,Y):
"""Train the surrogate model with the given set of inputs and outputs.
"""
X, Y = self._fit_adapter(X, Y)
self.model.fit(X, Y,tol=self.tolerance, initial_range=self.initial_range)
def _fit_adapter(self, X, Y):
if len(np.shape(np.array(X[0]))) == 1:
X = [X]
Y = [Y]
X = [np.array(x) for x in reversed(X)]
Y = [np.array(y) for y in reversed(Y)]
return (X,Y)
class FloatMultiFiCoKrigingSurrogate(MultiFiCoKrigingSurrogate):
"""Predictions are returned as floats, which are the mean of the
NormalDistribution predicted by the base class model."""
def predict(self, new_x):
dist = super(FloatMultiFiCoKrigingSurrogate, self).predict(new_x)
return dist.mu
if __name__ == "__main__":
import doctest
doctest.testmod()
|
data/SparkPost/python-sparkpost/sparkpost/tornado/transmissions.py
|
from .utils import wrap_future
from ..transmissions import Transmissions as SyncTransmissions
class Transmissions(SyncTransmissions):
def get(self, transmission_id):
results = self._fetch_get(transmission_id)
return wrap_future(results, lambda f: f["transmission"])
|
data/PyHDI/Pyverilog/examples/example_dataflow_analyzer.py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
from optparse import OptionParser
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pyverilog.utils.version
from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer
def main():
INFO = "Verilog module signal/module dataflow analyzer"
VERSION = pyverilog.utils.version.VERSION
USAGE = "Usage: python example_dataflow_analyzer.py -t TOPMODULE file ..."
def showVersion():
print(INFO)
print(VERSION)
print(USAGE)
sys.exit()
optparser = OptionParser()
optparser.add_option("-v","--version",action="store_true",dest="showversion",
default=False,help="Show the version")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("-t","--top",dest="topmodule",
default="TOP",help="Top module, Default=TOP")
optparser.add_option("--nobind",action="store_true",dest="nobind",
default=False,help="No binding traversal, Default=False")
optparser.add_option("--noreorder",action="store_true",dest="noreorder",
default=False,help="No reordering of binding dataflow, Default=False")
(options, args) = optparser.parse_args()
filelist = args
if options.showversion:
showVersion()
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
if len(filelist) == 0:
showVersion()
analyzer = VerilogDataflowAnalyzer(filelist, options.topmodule,
noreorder=options.noreorder,
nobind=options.nobind,
preprocess_include=options.include,
preprocess_define=options.define)
analyzer.generate()
directives = analyzer.get_directives()
print('Directive:')
for dr in sorted(directives, key=lambda x:str(x)):
print(dr)
instances = analyzer.getInstances()
print('Instance:')
for module, instname in sorted(instances, key=lambda x:str(x[1])):
print((module, instname))
if options.nobind:
print('Signal:')
signals = analyzer.getSignals()
for sig in signals:
print(sig)
print('Const:')
consts = analyzer.getConsts()
for con in consts:
print(con)
else:
terms = analyzer.getTerms()
print('Term:')
for tk, tv in sorted(terms.items(), key=lambda x:str(x[0])):
print(tv.tostr())
binddict = analyzer.getBinddict()
print('Bind:')
for bk, bv in sorted(binddict.items(), key=lambda x:str(x[0])):
for bvi in bv:
print(bvi.tostr())
if __name__ == '__main__':
main()
|
data/adaptivdesign/django-sellmo/sellmo/contrib/payment/inject.py
|
from sellmo.core.loading import load
NO_MUTATION = object()
import sellmo.apps.checkout as _checkout
@load(before='finalize_checkout_Order')
def load_model():
class Order(_checkout.models.Order):
_mutated_payment = NO_MUTATION
def invalidate(self):
super(Order, self).invalidate()
self.payment_method = None
@property
def payment(self):
if self._mutated_payment is not NO_MUTATION:
return self._mutated_payment
return getattr(self, 'order_payment', None)
def get_payment_method(self):
if self.payment is not None:
from .method import PaymentMethod
return PaymentMethod.from_payment(self.payment)
def set_payment_method(self, value):
payment = None
if value is not None:
payment = value.make_payment(self)
self._mutated_payment = payment
payment_method = property(get_payment_method, set_payment_method)
def save(self, *args, **kwargs):
super(Order, self).save(*args, **kwargs)
if self._mutated_payment is not NO_MUTATION:
if hasattr(self, 'order_payment'):
self.order_payment.delete()
self.order_payment.order = None
if self._mutated_payment is not None:
self._mutated_payment.order = self
self._mutated_payment.save()
self._mutated_payment = NO_MUTATION
class Meta(_checkout.models.Order.Meta):
abstract = True
_checkout.models.Order = Order
|
data/adaptivdesign/django-sellmo/sellmo/core/registry/base.py
|
import sys
import traceback
from types import ModuleType
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.utils import six
from django.utils.functional import cached_property
from .exceptions import RegistryError
allowed = None
def should_fix_type(typ):
global allowed
if allowed is None:
allowed = ['sellmo']
for app in apps.get_app_configs():
allowed.append(app.name)
return any(typ.__module__.startswith(name) for name in allowed)
class ModuleAttribute(object):
def __init__(self, module_name, name):
self.module_name = module_name
self.name = name
self._accessed = False
self._access_traceback = None
self._injection_handlers = []
def inject(self, handler):
self._injection_handlers.insert(0, handler)
@property
def is_accesed(self):
return self._accessed
@property
def is_assigned(self):
return hasattr(self, '_value')
def access(self):
if not self._accessed:
if getattr(settings, 'DEBUG', False):
self._access_traceback = traceback.extract_stack()
value = self._value
for handler in self._injection_handlers:
try:
value = handler(value)
except Exception as ex:
raise RegistryError, ex, sys.exc_info()[2]
if isinstance(value, type) and should_fix_type(value):
value.__module__ = self.module_name
self._accessed = True
self._value = value
return self._value
def assign(self, value):
if self.is_accesed:
filename, line, name, text = '?', '?', '?', '?'
if self._access_traceback is not None:
filename, line, name, text = self._access_traceback[-3]
raise RegistryError(
"Cannot assign '%s'. "
"Was already accessed by "
"%s:%s %s '%s'" % (
self.name,
filename,
line,
name,
text))
self._value = value
class BaseModule(ModuleType):
_imports = None
_imported_attrs = None
_modules = {}
def __new__(cls, fullname):
if fullname in cls._modules:
raise Exception()
module = super(BaseModule, cls).__new__(cls, fullname)
cls._modules[fullname] = module
return module
def __init__(self, fullname):
super(BaseModule, self).__init__(fullname)
self._attrs = {}
def _import(self):
if self._imported_attrs is None:
self._imported_attrs = {}
if self._imports:
module = import_module(self._imports)
self._imported_attrs.update(
**{
name: value
for name, value in six.iteritems(vars(module))
if not name.startswith('_')
}
)
def __getattribute__(self, name):
if name.startswith('_'):
try:
return super(BaseModule, self).__getattribute__(name)
except AttributeError:
pass
elif not self[name].is_assigned:
try:
value = super(BaseModule, self).__getattribute__(name)
except AttributeError:
self._import()
if name in self._imported_attrs:
self[name].assign(self._imported_attrs[name])
else:
self[name].assign(value)
if self[name].is_assigned:
return self[name].access()
raise AttributeError(name)
@property
def __all__(self):
names = set()
for name in six.iterkeys(self._attrs):
names.add(name)
for name in six.iterkeys(self._base_attrs):
names.add(name)
return list(names)
def __setattr__(self, name, value):
if name.startswith('_'):
self.__dict__[name] = value
else:
self[name].assign(value)
def __getitem__(self, name):
if name not in self._attrs:
self._attrs[name] = ModuleAttribute(self.__name__, name)
return self._attrs[name]
@classmethod
def imports(cls, module):
return type('Module', (cls, ), {'_imports': module})
@classmethod
def find_module(cls, fullname, path=None):
if fullname in cls._modules:
return cls
@classmethod
def load_module(cls, fullname):
module = cls._modules[fullname]
module.__loader__ = cls
module.__file__ = "<%s>" % cls.__name__
module.__package__ = fullname.rpartition('.')[0]
module._import()
sys.modules.setdefault(fullname, module)
return module
|
data/Sandia-Labs/PVLIB_Python/pvlib/pvl_getaoi.py
|
import pandas as pd
import numpy as np
import pvl_tools
def pvl_getaoi(SurfTilt,SurfAz,SunZen,SunAz):
'''
Determine angle of incidence from surface tilt/azimuth and apparent sun zenith/azimuth
The surface is defined by its tilt angle from horizontal and its azimuth pointing angle.
The sun position is defined by the apparent (refraction corrected)sun zenith angle and the sun
azimuth angle.
Parameters
----------
SurfTilt : scalar or DataFrame of surface tilt angles in decimal degrees
If SurfTilt is a DataFrame it must be of the same size as all other DataFrame
inputs. SurfTilt must be >=0 and <=180. The tilt angle is defined as
degrees from horizontal (e.g. surface facing up = 0, surface facing
horizon = 90)
SurfAz : scalar or DataFrame of the surface azimuth angles in decimal degrees
If SurfAz is a DataFrame it must be of the same size as all other DataFrame
inputs. SurfAz must be >=0 and <=360. The Azimuth convention is defined
as degrees east of north (e.g. North = 0, East = 90, West = 270).
SunZen : scalar or DataFrame of apparent (refraction-corrected) zenith angles in decimal degrees.
If SunZen is a DataFrame it must be of the same size as all other DataFrame
inputs. SunZen must be >=0 and <=180.
SunAz : scalar or DataFrame of sun azimuth angles in decimal degrees
If SunAz is a DataFrame it must be of the same size as all other DataFrame
inputs. SunAz must be >=0 and <=360. The Azimuth convention is defined
as degrees east of north (e.g. North = 0, East = 90, West = 270).
Returns
-------
AOI : DataFrame
The angle, in decimal degrees, between the surface normal DataFrame and the sun beam DataFrame.
References
----------
D.L. King, J.A. Kratochvil, W.E. Boyson. "Spectral and
Angle-of-Incidence Effects on Photovoltaic Modules and Solar Irradiance
Sensors". 26th IEEE Photovoltaic Specialists Conference. Sept. 1997.
See Also
--------
PVL_EPHEMERIS
'''
Vars=locals()
Expect={'SurfTilt':('num','x>=0'),
'SurfAz':('num','x>=-180','x<=180'),
'SunZen':('x>=0'),
'SunAz':('x>=0')
}
var=pvl_tools.Parse(Vars,Expect)
AOI=np.degrees(np.arccos(np.cos(np.radians(var.SunZen))*(np.cos(np.radians(var.SurfTilt))) + np.sin(np.radians(var.SurfTilt))*(np.sin(np.radians(var.SunZen)))*(np.cos(np.radians(var.SunAz) - np.radians(var.SurfAz)))))
return pd.DataFrame({'AOI':AOI})
|
data/JT5D/Alfred-Popclip-Sublime/Sublime Text 2/JsFormat/libs/jsbeautifier/unpackers/packer.py
|
"""Unpacker for Dean Edward's p.a.c.k.e.r"""
import re
import string
from jsbeautifier.unpackers import UnpackingError
PRIORITY = 1
def detect(source):
"""Detects whether `source` is P.A.C.K.E.R. coded."""
return source.replace(' ', '').startswith('eval(function(p,a,c,k,e,r')
def unpack(source):
"""Unpacks P.A.C.K.E.R. packed js code."""
payload, symtab, radix, count = _filterargs(source)
if count != len(symtab):
raise UnpackingError('Malformed p.a.c.k.e.r. symtab.')
try:
unbase = Unbaser(radix)
except TypeError:
raise UnpackingError('Unknown p.a.c.k.e.r. encoding.')
def lookup(match):
"""Look up symbols in the synthetic symtab."""
word = match.group(0)
return symtab[unbase(word)] or word
source = re.sub(r'\b\w+\b', lookup, payload)
return _replacestrings(source)
def _filterargs(source):
"""Juice from a source file the four args needed by decoder."""
argsregex = (r"}\('(.*)', *(\d+), *(\d+), *'(.*)'\."
r"split\('\|'\), *(\d+), *(.*)\)\)")
args = re.search(argsregex, source, re.DOTALL).groups()
try:
return args[0], args[3].split('|'), int(args[1]), int(args[2])
except ValueError:
raise UnpackingError('Corrupted p.a.c.k.e.r. data.')
def _replacestrings(source):
"""Strip string lookup table (list) and replace values in source."""
match = re.search(r'var *(_\w+)\=\["(.*?)"\];', source, re.DOTALL)
if match:
varname, strings = match.groups()
startpoint = len(match.group(0))
lookup = strings.split('","')
variable = '%s[%%d]' % varname
for index, value in enumerate(lookup):
source = source.replace(variable % index, '"%s"' % value)
return source[startpoint:]
return source
class Unbaser(object):
"""Functor for a given base. Will efficiently convert
strings to natural numbers."""
ALPHABET = {
62 : '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ',
95 : (' !"
'[\]^_`abcdefghijklmnopqrstuvwxyz{|}~')
}
def __init__(self, base):
self.base = base
if 2 <= base <= 36:
self.unbase = lambda string: int(string, base)
else:
try:
self.dictionary = dict((cipher, index) for
index, cipher in enumerate(self.ALPHABET[base]))
except KeyError:
raise TypeError('Unsupported base encoding.')
self.unbase = self._dictunbaser
def __call__(self, string):
return self.unbase(string)
def _dictunbaser(self, string):
"""Decodes a value to an integer."""
ret = 0
for index, cipher in enumerate(string[::-1]):
ret += (self.base ** index) * self.dictionary[cipher]
return ret
|
data/OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/container.py
|
"""
The Container class.
"""
import datetime
import copy
import pprint
import socket
import sys
import weakref
copy._copy_dispatch[weakref.ref] = copy._copy_immutable
copy._deepcopy_dispatch[weakref.ref] = copy._deepcopy_atomic
copy._deepcopy_dispatch[weakref.KeyedRef] = copy._deepcopy_atomic
from zope.interface import Interface, implements
from numpy import ndarray
from traits.api import HasTraits, Missing, Python, \
push_exception_handler, TraitType, CTrait
from traits.has_traits import FunctionType, _clone_trait, MetaHasTraits
from traits.trait_base import not_none
from multiprocessing import connection
from openmdao.main.datatypes.file import FileRef
from openmdao.main.datatypes.list import List
from openmdao.main.datatypes.slot import Slot
from openmdao.main.datatypes.vtree import VarTree
from openmdao.main.interfaces import ICaseIterator, IResourceAllocator, \
IContainer, \
IVariableTree, IContainerProxy, IOverrideSet
from openmdao.main.mp_support import ObjectManager, \
is_instance, CLASSES_TO_PROXY, \
has_interface
from openmdao.main.rbac import rbac
from openmdao.main.variable import Variable, is_legal_name, _missing
from openmdao.main.array_helpers import flattened_value, get_index
from openmdao.util.log import Logger, logger
from openmdao.util import eggloader, eggsaver, eggobserver
from openmdao.util.eggsaver import SAVE_CPICKLE
from openmdao.util.typegroups import int_types, complex_or_real_types
_copydict = {
'deep': copy.deepcopy,
'shallow': copy.copy
}
_iodict = {'out': 'output', 'in': 'input'}
__missing__ = object()
def get_closest_proxy(obj, pathname):
"""Returns a tuple of the form (val, restofpath), where val
is either the object specified by dotted name 'pathname'
within obj, or the closest in-process proxy object that can be
resolved. If val is a proxy, restofpath will contain the
remaining part of pathname needed to resolve the desired attribute
within the proxy. Otherwise, val is the actual desired attribute
and restofpath is the empty string.
"""
names = pathname.split('.')
i = 0
for name in names:
if IContainerProxy.providedBy(obj):
return (obj, '.'.join(names[i:]))
try:
obj = getattr(obj, name)
except AttributeError:
break
i += 1
return (obj, '.'.join(names[i:]))
def proxy_parent(obj, pathname):
"""Returns a tuple of the form (par, restofpath), where par
is either the parent of the object specified by dotted name 'pathname'
within obj, or the closest in-process proxy object that can be
resolved. restofpath will contain the
remaining part of pathname needed to resolve the desired attribute
within the parent or proxy object.
"""
names = pathname.split('.')
i = 0
for name in names[:-1]:
if IContainerProxy.providedBy(obj):
return (obj, '.'.join(names[i:]))
try:
obj = getattr(obj, name)
except AttributeError:
break
i += 1
return (obj, '.'.join(names[i:]))
push_exception_handler(handler=lambda o, t, ov, nv: None,
reraise_exceptions=True,
main=True,
locked=True)
class _MetaSafe(MetaHasTraits):
""" Tries to keep users from shooting themselves in the foot. """
def __new__(mcs, class_name, bases, class_dict):
for name, obj in class_dict.items():
if isinstance(obj, Variable):
for base in bases:
if name in base.__dict__:
raise NameError('%s overrides attribute %r of %s'
% (class_name, name, base.__name__))
return super(_MetaSafe, mcs).__new__(mcs, class_name, bases, class_dict)
class SafeHasTraits(HasTraits):
"""
Special :class:`HasTraits` which is configured such that the class is
checked for any :class:`Variable` which might override an existing
attribute in any base class.
"""
__metaclass__ = _MetaSafe
def _check_bad_default(name, trait, obj=None):
if trait.vartypename not in ['Slot', 'VarTree'] and trait.required is True \
and not trait.assumed_default and trait._illegal_default_ is True:
msg = "variable '%s' is required and cannot have a default value" % name
if obj is None:
raise RuntimeError(msg)
else:
obj.raise_exception(msg, RuntimeError)
class Container(SafeHasTraits):
""" Base class for all objects having Traits that are visible
to the framework"""
implements(IContainer)
def __init__(self):
self._parent = None
self._name = None
super(Container, self).__init__()
self._call_cpath_updated = True
self._call_configure = True
self._managers = {}
self._added_traits = {}
self._getcache = {}
self._setcache = {}
self._copycache = {}
self._cached_traits_ = None
self._repair_trait_info = None
self._trait_metadata = {}
self._logger = Logger('')
for name, obj in self.items():
if isinstance(obj, FileRef):
setattr(self, name, obj.copy(owner=self))
for name, obj in self.__class__.__dict__['__class_traits__'].items():
ttype = obj.trait_type
if isinstance(ttype, VarTree):
variable_tree = getattr(self, name)
if not obj.required:
new_tree = variable_tree.copy()
setattr(self, name, new_tree)
if obj.required:
_check_bad_default(name, obj, self)
@property
def parent(self):
"""The parent Container of this Container."""
return self._parent
@parent.setter
def parent(self, value):
"""This is called when the parent attribute is changed."""
if self._parent is not value:
self._parent = value
self._fix_loggers(self, recurse=True)
self._branch_moved()
def _branch_moved(self):
self._call_cpath_updated = True
for n, cont in self.items():
if is_instance(cont, Container) and cont is not self._parent:
cont._branch_moved()
@property
def name(self):
"""The name of this Container."""
if self._name is None:
if self.parent:
self._name = find_name(self.parent, self)
self._fix_loggers(self, recurse=True)
elif self._call_cpath_updated is False:
self._name = ''
else:
return ''
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Container."""
if not is_legal_name(name):
raise NameError("name '%s' contains illegal characters" % name)
if self._name != name:
self._name = name
self._fix_loggers(self, recurse=True)
def _fix_loggers(self, container, recurse):
"""Fix loggers starting from `container`."""
container._logger.rename(container.get_pathname().replace('.', ','))
if recurse:
for name in container.list_containers():
obj = getattr(container, name)
self._fix_loggers(obj, recurse)
@rbac(('owner', 'user'))
def get_pathname(self, rel_to_scope=None):
""" Return full path name to this container, relative to scope
*rel_to_scope*. If *rel_to_scope* is *None*, return the full pathname.
"""
path = []
obj = self
name = obj.name
while obj is not rel_to_scope and name:
path.append(name)
obj = obj.parent
if obj is None:
break
name = obj.name
return '.'.join(path[::-1])
def get_trait(self, name, copy=False):
"""Returns the trait indicated by name, or None if not found. No
recursive search is performed if name contains dots. This is a
replacement for the trait() method on HasTraits objects because that
method can return traits that shouldn't exist. DO NOT use the trait()
function as a way to determine the existence of a trait.
"""
if self._cached_traits_ is None:
self._cached_traits_ = self.traits()
self._cached_traits_.update(self._instance_traits())
if copy:
if self._cached_traits_.get(name):
return self.trait(name, copy=copy)
else:
return None
else:
return self._cached_traits_.get(name)
def __deepcopy__(self, memo):
""" Overrides deepcopy for HasTraits because otherwise we lose instance
traits when we copy. :(
"""
id_self = id(self)
if id_self in memo:
return memo[id_self]
memo['traits_copy_mode'] = "deep"
saved_p = self._parent
saved_c = self._cached_traits_
saved_s = self._setcache
saved_g = self._getcache
self._parent = None
self._cached_traits_ = None
self._getcache = {}
self._setcache = {}
try:
result = super(Container, self).__deepcopy__(memo)
finally:
self._parent = saved_p
self._cached_traits_ = saved_c
self._getcache = saved_g
self._setcache = saved_s
olditraits = self._instance_traits()
for name, trait in olditraits.items():
if trait.type is not 'event' and name in self._added_traits:
if isinstance(trait.trait_type, VarTree):
if name not in result._added_traits:
result.add_trait(name, _clone_trait(trait))
else:
result.add_trait(name, _clone_trait(trait))
if name in self.__dict__:
result.__dict__[name] = copy.deepcopy(self.__dict__[name])
return result
def __getstate__(self):
"""Return dict representing this container's state."""
state = super(Container, self).__getstate__()
dct = {}
for name, trait in state['_added_traits'].items():
if trait.transient is not True:
dct[name] = trait
state['_added_traits'] = dct
state['_cached_traits_'] = None
state['_getcache'] = {}
state['_setcache'] = {}
return state
def __setstate__(self, state):
"""Restore this container's state. Components that need to do some
restore operations (such as connecting to a remote server or loading
a model file) before any get()/set() is attempted will generate
exceptions. However, the complete set of local traits must be available
since restore operations may depend on knowing what to restore.
Here we swallow errors from 'special' components and remember to retry
the operation in _repair_traits(), called by post_load(). Persistent
problems will be reported there.
"""
super(Container, self).__setstate__({})
self.__dict__.update(state)
self._repair_trait_info = {}
self._cached_traits_ = None
traits = self._alltraits()
for name, trait in self._added_traits.items():
if name not in traits:
self.add_trait(name, trait, refresh=False)
fixups = []
for name, trait in traits.items():
try:
get = trait.trait_type.get
except AttributeError:
continue
if get is not None:
if name not in self._added_traits:
try:
val = getattr(self, name)
self.remove_trait(name)
self.add_trait(name, trait)
setattr(self, name, val)
except Exception as exc:
self._logger.warning('Initial fix of %s (%s) failed: %s',
name, val, exc)
fixups.append((name, trait))
self._repair_trait_info['property'] = fixups
fixups = []
for name, val in self.__dict__.items():
if not name.startswith('__') and not self.get_trait(name):
try:
setattr(self, name, val)
except Exception as exc:
self._logger.warning('Initial fix of %s (%s) failed: %s',
name, val, exc)
fixups.append((name, val))
self._repair_trait_info['implicit'] = fixups
fixups = []
for name, trait in self._alltraits().items():
if isinstance(trait.trait_type, List):
try:
setattr(self, name, getattr(self, name))
except Exception as exc:
self._logger.warning('Initial fix of %s (%s) failed: %s',
name, val, exc)
fixups.append(name)
self._repair_trait_info['list'] = fixups
self._cached_traits_ = None
def _repair_traits(self):
"""To be called after loading a pickled state, but *after* any
post_load() processing (to handle cases where a component needs
to do internal setup before being used to get/set a trait).
This retries failed operations recorded in __setstate__().
"""
if self._repair_trait_info is None:
return
for name, trait in self._repair_trait_info['property']:
val = getattr(self, name)
self.remove_trait(name)
self.add_trait(name, trait)
setattr(self, name, val)
for name, val in self._repair_trait_info['implicit']:
setattr(self, name, val)
for name in self._repair_trait_info['list']:
setattr(self, name, getattr(self, name))
self._repair_trait_info = None
@classmethod
def add_class_trait(cls, name, *trait):
"""Overrides HasTraits definition of *add_class_trait* to
try to keep from clobbering framework stuff.
"""
bases = [cls]
bases.extend(cls.__bases__)
for base in bases:
if name in base.__dict__:
raise NameError('Would override attribute %r of %s'
% (name, base.__name__))
for t in trait:
_check_bad_default(name, t)
break
if name in cls._trait_metadata:
del cls._trait_metadata[name]
return super(Container, cls).add_class_trait(name, *trait)
def add_trait(self, name, trait, refresh=True):
"""Overrides HasTraits definition of *add_trait* in order to
keep track of dynamically added traits for serialization.
"""
if name.endswith('_items') and trait.type == 'event':
super(Container, self).add_trait(name, trait)
return
bases = [self.__class__]
bases.extend(self.__class__.__bases__)
for base in bases:
if name in base.__dict__:
raise NameError('Would override attribute %r of %s'
% (name, base.__name__))
_check_bad_default(name, trait, self)
if name not in self._added_traits:
self._added_traits[name] = trait
super(Container, self).add_trait(name, trait)
if self._cached_traits_ is not None:
self._cached_traits_[name] = self.trait(name)
if name in self._trait_metadata:
del self._trait_metadata[name]
if refresh:
getattr(self, name)
def remove_trait(self, name):
"""Overrides HasTraits definition of remove_trait in order to
keep track of dynamically added traits for serialization.
"""
try:
del self._added_traits[name]
except KeyError:
pass
try:
del self._cached_traits_[name]
except (KeyError, TypeError):
pass
try:
del self._trait_metadata[name]
except KeyError:
pass
super(Container, self).remove_trait(name)
@rbac(('owner', 'user'))
def get_attr_w_copy(self, path):
"""Same as the 'get' method, except that the value will be copied
if the variable has a 'copy' metadata attribute that is not None.
Possible values for 'copy' are 'shallow' and 'deep'.
Raises an exception if the variable cannot be found.
"""
obj = self.get(path)
copy = self._copycache.get(path, _missing)
if copy is _missing:
copy = self.get_metadata(path.split('[',1)[0], 'copy')
self._copycache[path] = copy
if copy:
if isinstance(obj, Container):
obj = obj.copy()
else:
obj = _copydict[copy](obj)
return obj
def _add_after_parent_set(self, name, obj):
pass
def _prep_for_add(self, name, obj):
"""Check for illegal adds and update the new child
object in preparation for insertion into self.
"""
if '.' in name:
self.raise_exception(
'add does not allow dotted path names like %s' %
name, ValueError)
elif not is_legal_name(name):
self.raise_exception("'%s' is a reserved or invalid name" % name,
NameError)
removed = False
if has_interface(obj, IContainer):
if self.contains(name) and getattr(self, name):
self.remove(name)
removed = True
if has_interface(obj, IContainer):
self._check_recursion(obj)
if IContainerProxy.providedBy(obj):
obj.parent = self._get_proxy(obj)
else:
obj.parent = self
obj.name = name
self._add_after_parent_set(name, obj)
if self._call_cpath_updated is False:
obj.cpath_updated()
return removed
def _post_container_add(self, name, obj, removed):
pass
def add(self, name, obj):
"""Add an object to this Container.
Returns the added object.
"""
removed = self._prep_for_add(name, obj)
if has_interface(obj, IContainer):
setattr(self, name, obj)
if self._cached_traits_ is None:
self.get_trait(name)
else:
self._cached_traits_[name] = self.trait(name)
self._post_container_add(name, obj, removed)
elif is_instance(obj, TraitType):
self.add_trait(name, obj)
else:
setattr(self, name, obj)
return obj
def _check_recursion(self, obj):
""" Check if adding `obj` will cause container recursion. """
ancestor = self
while is_instance(ancestor, Container):
if obj is ancestor:
self.raise_exception('add would cause container recursion',
ValueError)
ancestor = ancestor.parent
def _get_proxy(self, proxy):
"""
Return :class:`OpenMDAO_Proxy` for self usable by `proxy`.
We create a manager for each access type.
In addition, to avoid having to (remotely) manipulate a server's
`allowed_hosts`, we use a separate manager for each client accessing
via AF_INET from a unique host.
"""
addr_type = connection.address_type(proxy._token.address)
addr = proxy._token.address[0] if addr_type == 'AF_INET' else None
key = (addr_type, addr, proxy._authkey)
try:
manager = self._managers[key]
except KeyError:
if addr_type == 'AF_INET':
ip_addr = socket.gethostbyname(socket.gethostname())
address = (ip_addr, 0)
allowed_hosts = [addr]
if addr == ip_addr:
allowed_hosts.append('127.0.0.1')
else:
address = None
allowed_hosts = None
name = self.name or 'parent'
access = addr if addr_type == 'AF_INET' else addr_type
name = '%s-cb-%s' % (name, access)
manager = ObjectManager(self, address, authkey=proxy._authkey,
name=name, allowed_hosts=allowed_hosts)
self._managers[key] = manager
return manager.proxy
def _check_rename(self, oldname, newname):
if '.' in oldname or '.' in newname:
self.raise_exception("can't rename '%s' to '%s': rename only works"
" within a single scope." %
(oldname, newname), RuntimeError)
if not self.contains(oldname):
self.raise_exception("can't rename '%s' to '%s': '%s' was not found." %
(oldname, newname, oldname), RuntimeError)
if self.contains(newname):
self.raise_exception("can't rename '%s' to '%s': '%s' already exists." %
(oldname, newname, newname), RuntimeError)
def rename(self, oldname, newname):
"""Renames a child of this object from oldname to newname."""
self._check_rename(oldname, newname)
obj = self.remove(oldname)
self.add(newname, obj)
def remove(self, name):
"""Remove the specified child from this container and remove any
public trait objects that reference that child. Notify any
observers.
"""
if '.' in name:
self.raise_exception(
'remove does not allow dotted path names like %s' %
name, NameError)
try:
obj = getattr(self, name)
except AttributeError:
return None
trait = self.get_trait(name)
if trait is None:
delattr(self, name)
else:
if trait.is_trait_type(Slot):
try:
setattr(self, name, None)
except TypeError as err:
self.raise_exception(str(err), RuntimeError)
else:
self.remove_trait(name)
return obj
@rbac(('owner', 'user'))
def configure(self):
pass
@rbac(('owner', 'user'))
def copy(self):
"""Returns a deep copy without deepcopying the parent.
"""
cp = copy.deepcopy(self)
cp._relink()
return cp
def _relink(self):
"""Restore parent links in copy."""
for name in self.list_containers():
container = getattr(self, name)
if container is not self._parent:
container._parent = self
container._relink()
@rbac(('owner', 'user'))
def cpath_updated(self):
"""Called after the hierarchy containing this Container has been
defined back to the root. This does not guarantee that all sibling
Containers have been defined. It also does not guarantee that this
component is fully configured to execute. Classes that override this
function must call their base class version.
This version calls cpath_updated() on all of its child Containers.
"""
self._fix_loggers(self, recurse=False)
self._call_cpath_updated = False
for cont in self.list_containers():
cont = getattr(self, cont)
if cont is not self._parent:
cont.cpath_updated()
def revert_to_defaults(self, recurse=True):
"""Sets the values of all of the inputs to their default values."""
self.reset_traits(iotype='in')
if recurse:
for cname in self.list_containers():
getattr(self, cname).revert_to_defaults(recurse)
def _items(self, visited, recurse=False, **metadata):
"""Return an iterator that returns a list of tuples of the form
(rel_pathname, obj) for each trait of this Container that matches
the given metadata. If recurse is True, also iterate through all
child Containers of each Container found.
"""
if id(self) not in visited:
visited.add(id(self))
match_dict = self._alltraits(**metadata)
if recurse:
for name in self.list_containers():
obj = getattr(self, name)
if name in match_dict and id(obj) not in visited:
yield(name, obj)
if obj:
for chname, child in obj._items(visited, recurse,
**metadata):
yield ('.'.join((name, chname)), child)
for name, trait in match_dict.items():
obj = getattr(self, name, Missing)
if obj is not Missing:
if is_instance(obj, (Container, VarTree)) and \
id(obj) not in visited:
if not recurse:
yield (name, obj)
elif trait.iotype is not None:
yield (name, obj)
def items(self, recurse=False, **metadata):
"""Return a list of tuples of the form (rel_pathname, obj) for each
trait of this Container that matches the given metadata. If recurse is
True, also iterate through all child Containers of each Container
found.
"""
return self._items(set([id(self.parent)]), recurse, **metadata)
def list_containers(self):
"""Return a list of names of child Containers."""
return [n for n, v in self.items() if is_instance(v, Container)]
def list_vars(self):
"""Return a list of Variables in this Container."""
return [k for k, v in self.items(iotype=not_none)]
@rbac(('owner', 'user'))
def _alltraits(self, traits=None, events=False, **metadata):
"""This returns a dict that contains traits (class and instance)
that match the given metadata. If the 'traits' argument is not
None, then it is assumed to be the dict of traits to be filtered.
"""
if traits is None:
if self._cached_traits_:
traits = self._cached_traits_
else:
traits = self.traits()
traits.update(self._instance_traits())
self._cached_traits_ = traits
result = {}
for name, trait in traits.items():
if not events and trait.type is 'event':
continue
for meta_name, meta_eval in metadata.items():
if type(meta_eval) is FunctionType:
if not meta_eval(getattr(trait, meta_name)):
break
elif meta_eval != getattr(trait, meta_name):
break
else:
result[name] = trait
return result
@rbac(('owner', 'user'))
def contains(self, path):
"""Return True if the child specified by the given dotted path
name is contained in this Container.
"""
childname, _, restofpath = path.partition('.')
if restofpath:
obj = getattr(self, childname, Missing)
if obj is Missing:
return False
elif is_instance(obj, Container):
return obj.contains(restofpath)
else:
return hasattr(obj, restofpath)
return hasattr(self, path)
def _get_metadata_failed(self, traitpath, metaname):
self.raise_exception("Couldn't find metadata for trait %s" % traitpath,
AttributeError)
@rbac(('owner', 'user'))
def get_metadata(self, traitpath, metaname=None):
"""Retrieve the metadata associated with the trait found using
traitpath. If metaname is None, return the entire metadata dictionary
for the specified trait. Otherwise, just return the specified piece
of metadata. If the specified piece of metadata is not part of
the trait, None is returned.
"""
childname, _, restofpath = traitpath.partition('.')
if restofpath:
obj = getattr(self, childname, Missing)
if obj is Missing:
return self._get_metadata_failed(traitpath, metaname)
elif hasattr(obj, 'get_metadata'):
return obj.get_metadata(restofpath, metaname)
else:
t = self.get_trait(childname)
if t is not None and t.iotype and metaname == 'iotype':
return t.iotype
else:
self._get_metadata_failed(traitpath, metaname)
varname, _, _ = traitpath.partition('[')
try:
mdict = self._trait_metadata[varname]
except KeyError:
t = self.get_trait(varname)
if t:
t = t.trait_type
mdict = t._metadata.copy()
mdict.setdefault('vartypename', t.__class__.__name__)
else:
mdict = self._get_metadata_failed(traitpath, None)
self._trait_metadata[varname] = mdict
if metaname is None:
return mdict
else:
return mdict.get(metaname, None)
@rbac(('owner', 'user'))
def set_metadata(self, traitpath, metaname, value):
"""Set the metadata associated with the trait found using traitpath."""
if metaname in ('iotype',):
self.raise_exception("Can't set %s on %s, read-only"
% (metaname, traitpath), TypeError)
self.get_metadata(traitpath)[metaname] = value
@rbac(('owner', 'user'), proxy_types=[FileRef])
def get(self, path):
"""Return the object specified by the given path, which may
contain '.' characters.
"""
expr = self._getcache.get(path)
if expr is not None:
return eval(expr, self.__dict__)
obj, restofpath = get_closest_proxy(self, path)
if restofpath and IContainerProxy.providedBy(obj):
return obj.get(restofpath)
expr = compile(path, path, mode='eval')
try:
val = eval(expr, self.__dict__)
except (AttributeError, NameError) as err:
if not restofpath:
return obj
self.raise_exception(str(err), AttributeError)
else:
self._getcache[path] = expr
return val
@rbac(('owner', 'user'), proxy_types=[FileRef])
def get_flattened_value(self, path):
"""Return the named value, which may include
an array index, as a flattened array of floats. If
the value is not flattenable into an array of floats,
raise a TypeError.
"""
return flattened_value(path, self.get(path))
@rbac(('owner', 'user'))
def set_flattened_value(self, path, value):
obj, restofpath = proxy_parent(self, path)
if restofpath and IContainerProxy.providedBy(obj):
obj.set_flattened_value(restofpath, value)
return
val = self.get(path)
if not isinstance(val, int_types) and isinstance(val, complex_or_real_types):
self.set(path, value[0])
return
elif hasattr(val, 'set_flattened_value'):
val.set_flattened_value(value)
return
elif isinstance(val, ndarray):
try:
newshape = value.shape
self.set(path, value.reshape(val.shape))
except Exception as err:
self.reraise_exception("ERROR setting value '%s.%s' shape: %s to shape %s"
% (self.get_pathname(), path, val.shape, newshape),
sys.exc_info())
return
val = self.get(path.split('[',1)[0])
idx = get_index(path)
if isinstance(val, int_types):
pass
elif hasattr(val, '__setitem__') and idx is not None:
if isinstance(val[idx], complex_or_real_types):
val[idx] = value[0]
else:
val[idx] = value
return
elif IVariableTree.providedBy(val):
raise NotImplementedError("no support for setting flattened values into vartrees")
elif hasattr(val, 'set_flattened_value'):
val.set_flattened_value(value)
return
self.raise_exception("Failed to set flattened value to variable %s" % path, TypeError)
def get_iotype(self, name):
return self.get_trait(name).iotype
@rbac(('owner', 'user'))
def set(self, path, value):
"""Set the value of the Variable specified by the given path, which
may contain '.' characters. The Variable will be set to the given
value, subject to validation and constraints.
"""
_local_setter_ = value
expr = self._setcache.get(path)
if expr is not None:
exec(expr)
return
obj, restofpath = proxy_parent(self, path)
if IOverrideSet.providedBy(obj) or (restofpath and IContainerProxy.providedBy(obj)):
obj.set(restofpath, value)
return
assign = "self.%s=_local_setter_" % path
expr = compile(assign, assign, mode='exec')
try:
exec(expr)
except Exception as err:
self.raise_exception(str(err), err.__class__)
else:
self._setcache[path] = expr
def save_to_egg(self, name, version, py_dir=None, src_dir=None,
src_files=None, child_objs=None, dst_dir=None,
observer=None, need_requirements=True):
"""Save state and other files to an egg. Typically used to copy all or
part of a simulation to another user or machine. By specifying child
containers in `child_objs`, it will be possible to create instances of
just those containers from the installed egg. Child container names
should be specified relative to this container.
name: string
Name for egg; must be an alphanumeric string.
version: string
Version for egg; must be an alphanumeric string.
py_dir: string
The (root) directory for local Python files. It defaults to
the current directory.
src_dir: string
The root of all (relative) `src_files`.
src_files: list
List of paths to files to be included in the egg.
child_objs: list
List of child objects for additional entry points.
dst_dir: string
The directory to write the egg in.
observer: callable
Will be called via an :class:`EggObserver`.
need_requirements: bool
Passed to :meth:`eggsaver.save_to_egg`.
After collecting entry point information, calls
:meth:`eggsaver.save_to_egg`.
Returns ``(egg_filename, required_distributions, orphan_modules)``.
"""
assert name and isinstance(name, basestring)
assert version and isinstance(version, basestring)
if not version.endswith('.'):
version += '.'
now = datetime.datetime.now()
tstamp = '%d.%02d.%02d.%02d.%02d' % \
(now.year, now.month, now.day, now.hour, now.minute)
version += tstamp
observer = eggobserver.EggObserver(observer, self._logger)
entry_pts = [(self, name, _get_entry_group(self))]
if child_objs is not None:
root_pathname = self.get_pathname()
root_start = root_pathname.rfind('.')
root_start = root_start+1 if root_start >= 0 else 0
root_pathname += '.'
for child in child_objs:
pathname = child.get_pathname()
if not pathname.startswith(root_pathname):
msg = '%s is not a child of %s' % (pathname, root_pathname)
observer.exception(msg)
self.raise_exception(msg, RuntimeError)
entry_pts.append((child, pathname[root_start:],
_get_entry_group(child)))
parent = self.parent
self.parent = None
try:
return eggsaver.save_to_egg(entry_pts, version, py_dir,
src_dir, src_files, dst_dir,
self._logger, observer.observer,
need_requirements)
except Exception:
self.reraise_exception(info=sys.exc_info())
finally:
self.parent = parent
def save(self, outstream, fmt=SAVE_CPICKLE, proto=-1):
"""Save the state of this object and its children to the given
output stream. Pure Python classes generally won't need to
override this because the base class version will suffice, but
Python extension classes will have to override. The format
can be supplied in case something other than cPickle is needed.
outstream: file or string
Stream to save to.
fmt: int
Format for saved data.
proto: int
Protocol used.
"""
parent = self.parent
self.parent = None
try:
eggsaver.save(self, outstream, fmt, proto, self._logger)
except Exception:
self.reraise_exception(info=sys.exc_info())
finally:
self.parent = parent
@staticmethod
def load_from_eggfile(filename, observer=None, log=None):
"""Extract files in egg to a subdirectory matching the saved object
name and then load object graph state.
filename: string
Name of egg file to be loaded.
observer: callable
Will be called via an :class:`EggObserver`.
log: :class:`logging.Logger`
Used for logging progress, default is root logger.
Returns the root object.
"""
entry_group = 'openmdao.top'
entry_name = 'top'
log = log or logger
return eggloader.load_from_eggfile(filename, entry_group, entry_name,
log, observer)
@staticmethod
def load_from_eggpkg(package, entry_name=None, instance_name=None,
observer=None):
"""Load object graph state by invoking the given package entry point.
If specified, the root object is renamed to `instance_name`.
package: string
Package name.
entry_name: string
Name of entry point.
instance_name: string
Name for root object.
observer: callable
Will be called via an :class:`EggObserver`.
Returns the root object.
"""
entry_group = 'openmdao.component'
if not entry_name:
entry_name = package
return eggloader.load_from_eggpkg(package, entry_group, entry_name,
instance_name, logger, observer)
@staticmethod
def load(instream, fmt=SAVE_CPICKLE, package=None, call_post_load=True,
name=None):
"""Load object(s) from the input stream. Pure Python classes generally
won't need to override this, but extensions will. The format can be
supplied in case something other than cPickle is needed.
instream: file or string
Stream to load from.
fmt: int
Format of state data.
package: string
Name of package to look for `instream`, if `instream` is a string
that is not an existing file.
call_post_load: bool
If True, call :meth:`post_load`.
name: string
Name for root object.
Returns the root object.
"""
top = eggloader.load(instream, fmt, package, logger)
top.cpath_updated()
if name:
top.name = name
if call_post_load:
top.parent = None
top.post_load()
return top
def post_load(self):
"""Perform any required operations after the model has been loaded.
At this point the local configuration of the Component is valid,
but 'remote' traits may need 'repairing' which can't be done until
the remote environment is ready. Components with remote environments
should override this and restore the remote environment first, then
call the superclass.
"""
self._repair_traits()
for name in self.list_containers():
getattr(self, name).post_load()
@rbac('owner')
def pre_delete(self):
"""Perform any required operations before the model is deleted."""
for name in self.list_containers():
getattr(self, name).pre_delete()
@rbac(('owner', 'user'), proxy_types=[CTrait])
def get_dyn_trait(self, pathname, iotype=None, trait=None):
"""Returns a trait if a trait with the given pathname exists, possibly
creating it "on-the-fly" and adding its Container. If an attribute exists
with the given pathname but no trait is found or can be created, or if
pathname references a trait in a parent scope, None will be returned.
If no attribute exists with the given pathname within this scope, an
AttributeError will be raised.
pathname: str
Pathname of the desired trait. May contain dots.
iotype: str (optional)
Expected iotype of the trait.
trait: TraitType (optional)
Trait to be used for validation.
"""
if pathname.startswith('parent.'):
return None
cname, _, restofpath = pathname.partition('.')
if restofpath:
child = getattr(self, cname)
if is_instance(child, Container):
return child.get_dyn_trait(restofpath, iotype, trait)
else:
if deep_hasattr(child, restofpath):
return None
else:
trait = self.get_trait(cname)
if trait is not None:
if iotype is not None:
if isinstance(trait.trait_type, Python):
obj = getattr(self, cname)
t_iotype = getattr(obj, 'iotype', None)
else:
t_iotype = self.get_iotype(cname)
if (iotype == 'in' and t_iotype not in ('in', 'state')) or \
(iotype == 'out' and t_iotype not in ('out', 'in', 'state', 'residual')):
self.raise_exception("'%s' must be an %s variable" %
(pathname, _iodict[iotype]),
RuntimeError)
return trait
elif trait is None and self.contains(cname):
return None
self.raise_exception("Cannot locate variable named '%s'" %
pathname, AttributeError)
@rbac(('owner', 'user'))
def get_trait_typenames(self, pathname, iotype=None):
"""Return names of the 'final' type (bypassing passthrough traits)
for `pathname` using :meth:`get_dyn_trait`. Used by dynamic wrappers
to determine the type of variable to wrap. The returned list is a
depth-first traversal of the class hierarchy.
pathname: str
Pathname of the desired trait. May contain dots.
iotype: str (optional)
Expected iotype of the trait.
"""
if not pathname:
obj = self
else:
trait = self.get_dyn_trait(pathname, iotype=iotype)
if trait is None:
return []
trait = trait.trait_type or trait.trait or trait
if trait.target:
trait = self.get_dyn_trait(trait.target)
try:
ttype = trait.trait_type
except AttributeError:
pass
else:
if ttype is not None:
trait = ttype
if isinstance(trait, Python):
obj = self.get(pathname)
else:
obj = trait
names = []
Container._bases(type(obj), names)
return names
@staticmethod
def _bases(cls, names):
""" Helper for :meth:`get_trait_typenames`. """
names.append('%s.%s' % (cls.__module__, cls.__name__))
for base in cls.__bases__:
Container._bases(base, names)
def raise_exception(self, msg, exception_class=Exception):
"""Raise an exception."""
coords = ''
obj = self
while obj is not None:
try:
coords = obj.get_itername()
except AttributeError:
try:
obj = obj.parent
except AttributeError:
break
else:
break
if coords:
full_msg = '%s (%s): %s' % (self.get_pathname(), coords, msg)
else:
full_msg = '%s: %s' % (self.get_pathname(), msg)
raise exception_class(full_msg)
def reraise_exception(self, msg='', info=None):
"""Re-raise an exception with updated message and original traceback."""
if info is None:
exc_type, exc_value, exc_traceback = sys.exc_info()
else:
exc_type, exc_value, exc_traceback = info
if msg:
msg = '%s: %s' % (msg, exc_value)
else:
msg = '%s' % exc_value
prefix = '%s: ' % self.get_pathname()
if not msg.startswith(prefix):
msg = prefix + msg
new_exc = exc_type(msg)
raise exc_type, new_exc, exc_traceback
def build_trait(self, ref_name, iotype=None, trait=None):
"""Build a trait referring to `ref_name`.
This is called by :meth:`create_io_traits`.
This must be overridden.
iotype: str or dict
If `iotype` is a string it specifies the trait's iotype.
If it's a dictionary, it provides metadata.
trait: Trait
If `trait` is not None, use that trait rather than building one.
"""
self.raise_exception('build_trait()', NotImplementedError)
CLASSES_TO_PROXY.append(Container)
CLASSES_TO_PROXY.append(FileRef)
def _get_entry_group(obj):
"""Return entry point group for given object type."""
if _get_entry_group.group_map is None:
from openmdao.main.component import Component
from openmdao.main.driver import Driver
_get_entry_group.group_map = [
(Variable, 'openmdao.variable'),
(Driver, 'openmdao.driver'),
(ICaseIterator, 'openmdao.case_iterator'),
(IResourceAllocator, 'openmdao.resource_allocator'),
(Component, 'openmdao.component'),
(Container, 'openmdao.container'),
]
for cls, group in _get_entry_group.group_map:
if issubclass(cls, Interface):
if cls.providedBy(obj):
return group
else:
if isinstance(obj, cls):
return group
return None
_get_entry_group.group_map = None
def dump(cont, recurse=False, stream=None, **metadata):
"""Print all items having specified metadata and
their corresponding values to the given stream. If the stream
is not supplied, it defaults to *sys.stdout*.
"""
pprint.pprint(dict([(n, str(v))
for n, v in cont.items(recurse=recurse,
**metadata)]),
stream)
def find_name(parent, obj):
"""Find the given object in the specified parent and return its name
in the parent's `__dict__`. There could be multiple names bound to a
given object. Only the first name found is returned.
Return '' if not found.
"""
for name, val in parent.__dict__.items():
if val is obj:
return name
return ''
def get_default_name(obj, scope):
"""Return a unique name for the given object in the given scope."""
classname = obj.__class__.__name__.lower()
if scope is None:
sdict = {}
else:
sdict = scope.__dict__
ver = 1
while '%s%d' % (classname, ver) in sdict:
ver += 1
return '%s%d' % (classname, ver)
def find_trait_and_value(obj, pathname):
"""Return a tuple of the form (trait, value) for the given dotted
pathname. Raises an exception if the value indicated by the pathname
is not found in obj. If the value is found but has no trait, then
(None, value) is returned.
"""
names = pathname.split('.')
for name in names[:-1]:
obj = getattr(obj, name)
if is_instance(obj, Container):
objtrait = obj.get_trait(names[-1])
elif isinstance(obj, HasTraits):
objtrait = obj.trait(names[-1])
else:
objtrait = None
return (objtrait, getattr(obj, names[-1]))
def create_io_traits(cont, obj_info, iotype='in'):
"""Create io trait(s) specified by the contents of `obj_info`. Calls
:meth:`build_trait` on :class:`Container` `cont`, which can be overridden
by subclasses, to create each trait. One use of this is to provide traits
mapping to variables inside a :class:`Component` implemented as a Python
extension module.
`obj_info` is assumed to be either a string, a tuple, or a list
that contains strings and/or tuples. The information is used to specify
the "internal" and "external" names of the variable.
The "internal" name uses the naming scheme within the Container.
The "external" name is the one that will be used to access the trait
from outside the Container; it must not contain any '.' characters.
A string specifies the "internal" name for the variable. The "external"
name will be the "internal" name with any '.' characters replaced by '_'.
Tuples must contain the "internal" name followed by the "external" name
and may optionally contain an iotype and a validation trait. If the iotype
is a dictionary rather than a string, it is used for trait metadata (it may
include the ``iotype`` key but does not have to).
`iotype` is the default I/O type to be used.
The newly created traits are added to the specified Container.
For example, the following are valid calls::
create_io_traits(obj, 'foo')
create_io_traits(obj, 'inputs.foo')
create_io_traits(obj, ['foo','bar','baz'], iotype='out')
create_io_traits(obj, ('foo', 'foo_alias', 'in', Bool()), 'bar')
create_io_traits(obj, [('foo', 'fooa', {low=-1, high=10}),
('bar', 'barb', 'out'),
('baz', 'bazz')])
"""
if isinstance(obj_info, (basestring, tuple)):
it = [obj_info]
else:
it = obj_info
for entry in it:
iostat = iotype
trait = None
if isinstance(entry, basestring):
ref_name = entry
name = entry.replace('.', '_')
elif isinstance(entry, tuple):
ref_name = entry[0]
name = entry[1] or ref_name.replace('.', '_')
try:
iostat = entry[2]
trait = entry[3]
except IndexError:
pass
else:
cont.raise_exception('create_io_traits cannot add trait %s' % entry,
RuntimeError)
if '.' in name:
cont.raise_exception("Can't create '%s' because it's a"
" dotted pathname" % name, NameError)
newtrait = cont.get_trait(name)
if newtrait is not None:
cont.raise_exception(
"Can't create '%s' because it already exists." % name,
RuntimeError)
if not cont.contains(ref_name):
cont.raise_exception("Can't create trait for '%s' because it wasn't"
" found" % ref_name, AttributeError)
cont.add_trait(name, cont.build_trait(ref_name, iostat, trait))
|
data/Mouse-Imaging-Centre/pydpiper/pydpiper_testing/conftest.py
|
def pytest_funcarg__setupopts(request):
return OptsSetup(request)
def pytest_addoption(parser):
parser.addoption("--uri-file", dest="urifile",
type=str, default=None,
help="Location for uri file if NameServer is not used. If not specified, default is current working directory.")
parser.addoption("--use-ns", dest="use_ns",
action="store_true",
help="Use the Pyro NameServer to store object locations")
parser.addoption("--create-graph", dest="create_graph",
action="store_true",
help="Create a .dot file with graphical representation of pipeline relationships")
parser.addoption("--num-executors", dest="num_exec",
type=int, default=0,
help="Launch executors automatically without having to run pipeline_excutor.py independently.")
parser.addoption("--time", dest="time",
type=str, default="2:00:00:00",
help="Wall time to request for each executor in the format dd:hh:mm:ss")
parser.addoption("--proc", dest="proc",
type=int, default=8,
help="Number of processes per executor. Default is 8. Also sets max value for processor use per executor. Overridden if --num-executors not specified.")
parser.addoption("--mem", dest="mem",
type=float, default=16,
help="Total amount of requested memory. Default is 8G. Overridden if --num-executors not specified.")
parser.addoption("--ppn", dest="ppn",
type=int, default=8,
help="Number of processes per node. Default is 8. Used when --queue=pbs")
parser.addoption("--queue", dest="queue",
type=str, default=None,
help="Use specified queueing system to submit jobs. Default is None.")
parser.addoption("--restart", dest="restart",
action="store_true",
help="Restart pipeline using backup files.")
parser.addoption("--backup-dir", dest="backup_directory",
type=str, default=".pipeline-backup",
help="Directory where this pipeline backup should be stored.")
class OptsSetup():
def __init__(self, request):
self.config = request.config
def returnAllOptions(self):
return self.config.option
def getNumExecutors(self):
return self.config.option.num_exec
def getTime(self):
return self.config.option.time
def getProc(self):
return self.config.option.proc
def getMem(self):
return self.config.option.mem
def getQueue(self):
return self.config.option.queue
def getPpn(self):
return self.config.option.ppn
def getRestart(self):
return self.config.option.restart
def getBackupDir(self):
return self.config.option.backup_directory
def returnSampleArgs(self):
sampleArgArray = ["TestProgName.py", "img_A.mnc", "img_B.mnc"]
return sampleArgArray
|
data/Knio/pynmea2/pynmea2/nmea_file.py
|
try:
basestring = basestring
except NameError:
basestring = str
from .nmea import NMEASentence
class NMEAFile(object):
"""
Reads NMEA sentences from a file similar to a standard python file object.
"""
def __init__(self, f, *args, **kwargs):
super(NMEAFile, self).__init__()
if isinstance(f, basestring) or args or kwargs:
self._file = self.open(f, *args, **kwargs)
else:
self._file = f
self._context = None
def open(self, fp, mode='r'):
"""
Open the NMEAFile.
"""
self._file = open(fp, mode=mode)
return self._file
def close(self):
"""
Close the NMEAFile.
"""
self._file.close()
def __iter__(self):
"""
Iterate through the file yielding NMEASentences
:return:
"""
for line in self._file:
yield self.parse(line)
def __enter__(self):
if hasattr(self._file, '__enter__'):
self._context = self._file.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._context:
ctx = self._context
self._context = None
ctx.__exit__(exc_type, exc_val, exc_tb)
def next(self):
"""
Iterate through the file object returning NMEASentence objects
:return: NMEASentence
"""
data = self._file.readline()
return self.parse(data)
def parse(self, s):
return NMEASentence.parse(s)
def readline(self):
"""
Return the next NMEASentence in the file object
:return: NMEASentence
"""
data = self._file.readline()
s = self.parse(data)
return s
def read(self):
"""
Return a list of NMEASentence objects for each line in the file
:return: list of NMEASentence objects
"""
return [s for s in self]
|
data/YelpArchive/pushmanager/pushmanager/servlets/livepush.py
|
import time
import sqlalchemy as SA
import pushmanager.core.db as db
import pushmanager.core.util
from pushmanager.core.mail import MailQueue
from pushmanager.core.rb import RBQueue
from pushmanager.core.requesthandler import RequestHandler
class LivePushServlet(RequestHandler):
def _arg(self, key):
return pushmanager.core.util.get_str_arg(self.request, key, '')
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushid = pushmanager.core.util.get_int_arg(self.request, 'id')
push_query = db.push_pushes.update().where(db.push_pushes.c.id == self.pushid).values({
'state': 'live',
'modified': time.time(),
})
request_query = db.push_requests.update().where(SA.and_(
db.push_requests.c.state == 'blessed',
SA.exists(
[1],
SA.and_(
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
)
))).values({
'state': 'live',
'modified': time.time(),
})
reset_query = db.push_requests.update().where(
SA.exists(
[1],
SA.and_(
db.push_requests.c.state == 'pickme',
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
)
)).values({
'state': 'requested',
})
delete_query = db.push_pushcontents.delete().where(
SA.exists([1], SA.and_(
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id,
db.push_requests.c.state == 'requested',
)))
live_query = db.push_requests.select().where(
SA.and_(db.push_requests.c.state == 'live',
db.push_pushcontents.c.push == self.pushid,
db.push_pushcontents.c.request == db.push_requests.c.id)
)
db.execute_transaction_cb(
[push_query, request_query, reset_query, delete_query, live_query],
self.on_db_complete,
)
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
_, _, _, _, live_requests = db_results
for req in live_requests:
if req['reviewid']:
review_id = int(req['reviewid'])
RBQueue.enqueue_review(review_id)
if req['watchers']:
user_string = '%s (%s)' % (req['user'], req['watchers'])
users = [req['user']] + req['watchers'].split(',')
else:
user_string = req['user']
users = [req['user']]
msg = (
"""
<p>
%(pushmaster)s has certified request for %(user)s as stable in production:
</p>
<p>
<strong>%(user)s - %(title)s</strong><br />
<em>%(repo)s/%(branch)s</em>
</p>
<p>
Regards,<br />
PushManager
</p>"""
) % pushmanager.core.util.EscapedDict({
'pushmaster': self.current_user,
'user': user_string,
'title': req['title'],
'repo': req['repo'],
'branch': req['branch'],
})
subject = "[push] %s - %s" % (user_string, req['title'])
MailQueue.enqueue_user_email(users, msg, subject)
|
data/QuantSoftware/QuantSoftwareToolkit/QSTK/qstktest/testDataAccess.py
|
'''
Created on Jun 1, 2010
@author: Shreyas Joshi
@summary: Just a quick way to test the DataAccess class... nothing more "I dare do all that may become a DataAccessTester. Who dares do more is none"
'''
import QSTK.qstkutil.DataAccess as da
import tables as pt
import numpy as np
from itertools import izip
import time
import dircache
def getStocks(listOfPaths):
listOfStocks=list()
print "Reading in all stock names..."
fileExtensionToRemove=".h5"
for path in listOfPaths:
stocksAtThisPath=list ()
stocksAtThisPath= dircache.listdir(str(path))
stocksAtThisPath = filter (lambda x:(str(x).find(str(fileExtensionToRemove)) > -1), stocksAtThisPath)
stocksAtThisPath = map(lambda x:(x.partition(str(fileExtensionToRemove))[0]),stocksAtThisPath)
for stock in stocksAtThisPath:
listOfStocks.append(stock)
return listOfStocks
if __name__ == '__main__':
print "Starting..."
dataItemsList=[]
dataItemsList.append('alphaValue')
listOfStocks= list()
listOfPaths=list()
listOfPaths.append("C:\\test\\temp\\")
listOfStocks= getStocks(listOfPaths)
alpha= da.DataAccess (True, listOfPaths, "/StrategyData", "StrategyData", True, listOfStocks)
tslist= list(alpha.getTimestampArray())
listOfTS= alpha.getTimestampArray()
for stock in ["AAPL"]:
alphaList= alpha.getStockDataList(stock, 'volume')
ctr=0
for val in alphaList:
print "stock: " + str(stock) + ", val: "+str(val) + ", ts: " + str(listOfTS[ctr])
ctr+=1
print "DONE!"
|
data/Suor/django-easymoney/tests/admin.py
|
from __future__ import absolute_import
from django.contrib import admin
from .models import Product, Option
admin.site.register(Product)
admin.site.register(Option)
|
data/JamesPHoughton/pysd/pysd/functions/__init__.py
|
from .functions import *
|
data/PythonJS/PythonJS/regtests/lang/if_not.py
|
"""if not"""
def main():
a = False
b = False
if not a:
b = True
TestError( b == True )
a = 0
b = False
if not a:
b = True
TestError( b == True )
a = 0.0
b = False
if not a:
b = True
TestError( b == True )
a = None
b = False
if not a:
b = True
TestError( b == True )
|
data/VisTrails/VisTrails/vistrails/db/versions/v0_9_0/persistence/__init__.py
|
from __future__ import division
from xml.auto_gen import XMLDAOListBase
from sql.auto_gen import SQLDAOListBase
from vistrails.core.system import get_elementtree_library
from vistrails.db import VistrailsDBException
from vistrails.db.versions.v0_9_0 import version as my_version
ElementTree = get_elementtree_library()
class DAOList(dict):
def __init__(self):
self['xml'] = XMLDAOListBase()
self['sql'] = SQLDAOListBase()
def parse_xml_file(self, filename):
return ElementTree.parse(filename)
def write_xml_file(self, filename, tree):
tree.write(filename)
def read_xml_object(self, vtType, node):
return self['xml'][vtType].fromXML(node)
def write_xml_object(self, obj, node=None):
res_node = self['xml'][obj.vtType].toXML(obj, node)
return res_node
def open_from_xml(self, filename, vtType, tree=None):
"""open_from_xml(filename) -> DBVistrail"""
if tree is None:
tree = self.parse_xml_file(filename)
vistrail = self.read_xml_object(vtType, tree.getroot())
return vistrail
def save_to_xml(self, obj, filename, tags):
"""save_to_xml(obj : object, filename: str, tags: dict) -> None
"""
root = self.write_xml_object(obj)
root.set('version', my_version)
for k, v in tags.iteritems():
root.set(k, v)
tree = ElementTree.ElementTree(root)
self.write_xml_file(filename, tree)
def open_from_db(self, db_connection, vtType, id, lock=False):
pass
def save_to_db(self, db_connection, obj, doCopy=False):
pass
def serialize(self, object):
root = self.write_xml_object(object)
return ElementTree.tostring(root)
def unserialize(self, str, obj_type):
try:
root = ElementTree.fromstring(str)
return self.read_xml_object(obj_type, root)
except SyntaxError, e:
msg = "Invalid VisTrails serialized object %s" % str
raise VistrailsDBException(msg)
return None
|
data/PyTables/PyTables/examples/undo-redo.py
|
"""Yet another couple of examples on do/undo feauture."""
import tables
def setUp(filename):
fileh = tables.open_file(filename, mode="w", title="Undo/Redo demo")
fileh.create_group("/", "agroup", "Group 1")
fileh.create_group("/agroup", "agroup2", "Group 2")
fileh.create_array("/", "anarray", [1, 2], "Array 1")
fileh.enable_undo()
return fileh
def tearDown(fileh):
fileh.disable_undo()
fileh.close()
def demo_6times3marks():
"""Checking with six ops and three marks."""
fileh = setUp("undo-redo-6times3marks.h5")
fileh.create_array('/', 'otherarray1', [3, 4], "Another array 1")
fileh.create_array('/', 'otherarray2', [4, 5], "Another array 2")
fileh.mark()
fileh.create_array('/', 'otherarray3', [5, 6], "Another array 3")
fileh.create_array('/', 'otherarray4', [6, 7], "Another array 4")
fileh.mark()
fileh.create_array('/', 'otherarray5', [7, 8], "Another array 5")
fileh.create_array('/', 'otherarray6', [8, 9], "Another array 6")
fileh.undo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
fileh.undo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
fileh.undo()
assert "/otherarray1" not in fileh
assert "/otherarray2" not in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" not in fileh
assert "/otherarray4" not in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" not in fileh
assert "/otherarray6" not in fileh
fileh.redo()
assert "/otherarray1" in fileh
assert "/otherarray2" in fileh
assert "/otherarray3" in fileh
assert "/otherarray4" in fileh
assert "/otherarray5" in fileh
assert "/otherarray6" in fileh
tearDown(fileh)
def demo_manyops():
"""Checking many operations together."""
fileh = setUp("undo-redo-manyops.h5")
fileh.create_array(fileh.root, 'anarray3', [3], "Array title 3")
fileh.create_group(fileh.root, 'agroup3', "Group title 3")
new_node = fileh.copy_node('/anarray3', '/agroup/agroup2')
new_node = fileh.copy_children('/agroup', '/agroup3', recursive=1)
fileh.rename_node('/anarray', 'anarray4')
new_node = fileh.copy_node('/anarray3', '/agroup')
fileh.remove_node('/anarray4')
fileh.undo()
assert '/anarray4' not in fileh
assert '/anarray3' not in fileh
assert '/agroup/agroup2/anarray3' not in fileh
assert '/agroup3' not in fileh
assert '/anarray4' not in fileh
assert '/anarray' in fileh
fileh.redo()
assert '/agroup/agroup2/anarray3' in fileh
assert '/agroup/anarray3' in fileh
assert '/agroup3/agroup2/anarray3' in fileh
assert '/agroup3/anarray3' not in fileh
assert fileh.root.agroup.anarray3 is new_node
assert '/anarray' not in fileh
assert '/anarray4' not in fileh
tearDown(fileh)
if __name__ == '__main__':
demo_6times3marks()
demo_manyops()
|
data/JetBrains/youtrack-rest-python-library/python/pyactiveresource/fake_connection.py
|
"""A fake HTTP connection for testing"""
__author__ = 'Mark Roach (mrroach@google.com)'
import urllib
from pyactiveresource import connection
from pyactiveresource import formats
class Error(Exception):
"""The base exception class for this module."""
class FakeConnection(object):
"""A fake HTTP connection for testing.
Inspired by ActiveResource's HttpMock class. This class is designed to
take a list of inputs and their corresponding outputs.
Inputs will be matched on the method, path, query and data arguments
Example:
>>> connection = FakeConnection()
>>> body = '<?xml ... />'
>>> connection.respond_to('get', '/foos/1.xml', None, None, body)
>>> class Foo(resource.Resource):
... _site = 'http://localhost/'
...
>>> Foo._connection_obj = connection
>>> Foo.find(1)
foo(1)
"""
def __init__(self, format=formats.XMLFormat):
"""Constructor for FakeConnection object."""
self.format = format
self._request_map = {}
self._debug_only = False
def _split_path(self, path):
"""Return the path and the query string as a dictionary."""
path_only, query_string = urllib.splitquery(path)
if query_string:
query_dict = dict([i.split('=') for i in query_string.split('&')])
else:
query_dict = {}
return path_only, query_dict
def debug_only(self, debug=True):
self._debug_only = debug
def respond_to(self, method, path, headers, data, body,
response_headers=None):
"""Set the response for a given request.
Args:
method: The http method (e.g. 'get', 'put' etc.).
path: The path being requested (e.g. '/collection/id.xml')
headers: Dictionary of headers passed along with the request.
data: The data being passed in as the request body.
body: The string that should be returned for a matching request.
response_headers: The headers returned for a matching request
Returns:
None
"""
path_only, query = self._split_path(path)
if response_headers is None:
response_headers = {}
self._request_map.setdefault(method, []).append(
((path_only, query, headers, data), (body, response_headers)))
def _lookup_response(self, method, path, headers, data):
path_only, query = self._split_path(path)
for key, value in self._request_map.get(method, {}):
if key == (path_only, query, headers, data):
response_body, response_headers = value
return connection.Response(200, response_body, response_headers)
raise Error('Invalid or unknown request: %s %s\n%s' %
(path, headers, data))
def get(self, path, headers=None):
"""Perform an HTTP get request."""
return self.format.decode(
self._lookup_response('get', path, headers, None).body)
def post(self, path, headers=None, data=None):
"""Perform an HTTP post request."""
return self._lookup_response('post', path, headers, data)
def put(self, path, headers=None, data=None):
"""Perform an HTTP post request."""
return self._lookup_response('put', path, headers, data)
def delete(self, path, headers=None):
"""Perform an HTTP delete request."""
return self._lookup_response('delete', path, headers, None)
|
data/ProgVal/Limnoria/plugins/Alias/config.py
|
import supybot.conf as conf
import supybot.registry as registry
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('Alias')
def configure(advanced):
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Alias', True)
Alias = conf.registerPlugin('Alias')
conf.registerGroup(Alias, 'aliases')
conf.registerGroup(Alias, 'escapedaliases')
conf.registerGlobalValue(Alias, 'validName',
registry.String(r'^[^\x00-\x20]+$', _("""Regex which alias names must match in order to be valid""")))
|
data/OpenMDAO/OpenMDAO-Framework/openmdao.devtools/src/openmdao/devtools/locdistbld.py
|
"""
This module builds a binary distribution from the specified source directory.
"""
import sys
import os
import shutil
import urllib2
import subprocess
import codecs
from optparse import OptionParser
def has_setuptools():
try:
import setuptools
except ImportError:
return False
return True
def make_new_setupfile(setupfile):
"""If setuptools is not installed, make a new setup file that will
bootstrap and use setuptools. The new file will be in the same location
as setupfile and will have '_new_' prepended to the name.
"""
setupfile = os.path.abspath(setupfile)
newsetupfile = os.path.join(os.path.dirname(setupfile),
'_new_'+os.path.basename(setupfile))
startdir = os.getcwd()
os.chdir(os.path.dirname(setupfile))
try:
print "setuptools is not installed."
if not os.path.isfile('ez_setup.py'):
print "Attempting to download ez_setup.py"
resp = urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py')
with open('ez_setup.py', 'wb') as easyf:
shutil.copyfileobj(resp.fp, easyf)
print 'successfully downloaded ez_setup.py'
print "Attempting to update %s to import from ez_setup" % setupfile
if not os.path.isfile(setupfile):
raise IOError("can't find setup file '%s'" % setupfile)
setupf = open(setupfile, 'r')
setup_contents = setupf.read()
setupf.close()
with open(newsetupfile, 'wb') as newf:
newf.write("from ez_setup import use_setuptools\n")
newf.write("use_setuptools(download_delay=0)\n\n")
newf.write(setup_contents)
finally:
os.chdir(startdir)
return newsetupfile
def build_dist(srcdir, destdir='.', build_type='bdist_egg'):
"""
Builds a distribution using the specified source directory and places
it in the specified destination directory.
srcdir: str
Source directory for the distribution to be built.
destdir: str
Directory where the built distribution file will be placed.
build_type: str
The type of distribution to be built. Default is 'bdist_egg'.
"""
startdir = os.getcwd()
destdir = os.path.abspath(os.path.expanduser(destdir)).replace('\\','/')
srcdir = os.path.abspath(os.path.expanduser(srcdir)).replace('\\','/')
setupname = os.path.join(srcdir, 'setup.py')
if not has_setuptools():
setupname = make_new_setupfile(setupname)
dirfiles = set(os.listdir(destdir))
print "building distribution in %s" % srcdir
cmd = [sys.executable.replace('\\','/'),
os.path.basename(setupname),
]
cmd.extend(build_type.split(' '))
cmd.extend(['-d', destdir])
os.chdir(srcdir)
out = codecs.open('_build_.out', 'wb',
encoding='ascii', errors='replace')
print 'running command: %s' % ' '.join(cmd)
try:
p = subprocess.Popen(' '.join(cmd),
stdout=out, stderr=subprocess.STDOUT,
shell=True)
p.wait()
finally:
out.close()
with open('_build_.out', 'r') as f:
print f.read()
os.chdir(startdir)
newfiles = set(os.listdir(destdir)) - dirfiles
if len(newfiles) != 1:
raise RuntimeError("expected one new file in in destination directory but found %s" %
list(newfiles))
if p.returncode != 0:
raise RuntimeError("problem building distribution in %s. (return code = %s)" %
(srcdir, p.returncode))
distfile = os.path.join(destdir, newfiles.pop())
print 'new distribution file is %s' % distfile
return distfile
if __name__ == '__main__':
parser = OptionParser(usage="%prog [OPTIONS]")
parser.add_option("-s","--src", action="store", type='string',
dest='srcdir',
help="name of directory where the distrib source files are located")
parser.add_option("-d","--dest", action="store", type='string',
dest='destdir', default='.',
help="name of directory where the build distrib will be placed")
parser.add_option("-b","--bldtype", action="store", type='string',
dest='buildtype', default='bdist_egg',
help="setup.py build command. Default is 'bdist_egg'")
(options, args) = parser.parse_args(sys.argv[1:])
retcode = -1
startdir = os.getcwd()
if not options.srcdir:
print "you must supply a source directory"
parser.print_help()
sys.exit(retcode)
srcdir = os.path.abspath(os.path.expanduser(options.srcdir))
destdir = os.path.abspath(os.path.expanduser(options.destdir))
if not os.path.exists(srcdir):
print "source directory %s not found" % srcdir
sys.exit(retcode)
try:
distfile = build_dist(srcdir, destdir, options.buildtype)
finally:
os.chdir(startdir)
|
data/ImageEngine/gaffer/python/GafferUITest/NumericSliderTest.py
|
import unittest
import GafferTest
import GafferUI
class NumericSliderTest( unittest.TestCase ) :
def testConstruction( self ) :
s = GafferUI.NumericSlider( value = 0, min = 0, max = 1 )
self.assertEqual( s.getPosition(), 0 )
self.assertEqual( s.getValue(), 0 )
self.assertEqual( s.getRange(), ( 0, 1, 0, 1 ) )
def testSetValue( self ) :
s = GafferUI.NumericSlider( value = 0, min = 0, max = 2 )
self.assertEqual( s.getPosition(), 0 )
self.assertEqual( s.getValue(), 0 )
s.setValue( 0.5 )
self.assertEqual( s.getPosition(), 0.25 )
self.assertEqual( s.getValue(), 0.5 )
def testSetRange( self ) :
s = GafferUI.NumericSlider( value = 1, min = 0, max = 2 )
self.assertEqual( s.getPosition(), 0.5 )
self.assertEqual( s.getValue(), 1 )
s.setRange( 0, 1 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( s.getValue(), 1 )
def testSetZeroRange( self ) :
s = GafferUI.NumericSlider( value = 1, min = 1, max = 2 )
self.assertEqual( s.getPosition(), 0 )
self.assertEqual( s.getValue(), 1 )
s.setRange( 1, 1 )
self.assertEqual( s.getValue(), 1 )
def testSetPosition( self ) :
s = GafferUI.NumericSlider( value = 0, min = 0, max = 2 )
self.assertEqual( s.getPosition(), 0 )
self.assertEqual( s.getValue(), 0 )
s.setPosition( 0.5 )
self.assertEqual( s.getPosition(), 0.5 )
self.assertEqual( s.getValue(), 1 )
def testValuesOutsideRangeAreClamped( self ) :
s = GafferUI.NumericSlider( value = 0.1, min = 0, max = 2 )
cs = GafferTest.CapturingSlot( s.valueChangedSignal(), s.positionChangedSignal() )
s.setValue( 3 )
self.assertEqual( s.getValue(), 2 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( len( cs ), 2 )
s.setValue( 3 )
self.assertEqual( s.getValue(), 2 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( len( cs ), 2 )
def testPositionsOutsideRangeAreClamped( self ) :
s = GafferUI.NumericSlider( value = 0.1, min = 0, max = 2 )
cs = GafferTest.CapturingSlot( s.valueChangedSignal(), s.positionChangedSignal() )
s.setPosition( 2 )
self.assertEqual( s.getValue(), 2 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( len( cs ), 2 )
s.setPosition( 2 )
self.assertEqual( s.getValue(), 2 )
self.assertEqual( s.getPosition(), 1 )
self.assertEqual( len( cs ), 2 )
def testHardRange( self ) :
s = GafferUI.NumericSlider( value = 0.1, min = 0, max = 2, hardMin=-1, hardMax=3 )
self.assertEqual( s.getRange(), ( 0, 2, -1, 3 ) )
cs = GafferTest.CapturingSlot( s.valueChangedSignal(), s.positionChangedSignal() )
s.setValue( 3 )
self.assertEqual( s.getValue(), 3 )
self.assertEqual( s.getPosition(), 1.5 )
self.assertEqual( len( cs ), 2 )
s.setValue( 3.5 )
self.assertEqual( s.getValue(), 3 )
self.assertEqual( s.getPosition(), 1.5 )
self.assertEqual( len( cs ), 2 )
s.setValue( -1 )
self.assertEqual( s.getValue(), -1 )
self.assertEqual( s.getPosition(), -0.5)
self.assertEqual( len( cs ), 4 )
s.setValue( -2 )
self.assertEqual( s.getValue(), -1 )
self.assertEqual( s.getPosition(), -0.5)
self.assertEqual( len( cs ), 4 )
def testSetRangeClampsValue( self ) :
s = GafferUI.NumericSlider( value = 0.5, min = 0, max = 2 )
self.assertEqual( s.getPosition(), 0.25 )
self.assertEqual( s.getValue(), 0.5 )
s.setRange( 1, 2 )
self.assertEqual( s.getPosition(), 0 )
self.assertEqual( s.getValue(), 1 )
def testMultipleValues( self ) :
self.assertRaises( Exception, GafferUI.NumericSlider, value = 0, values = [ 1, 2 ] )
s = GafferUI.NumericSlider( values = [ 1, 1.5 ], min = 0, max = 2 )
self.assertEqual( s.getValues(), [ 1, 1.5 ] )
self.assertEqual( s.getPositions(), [ 0.5, 0.75 ] )
self.assertRaises( ValueError, s.getValue )
if __name__ == "__main__":
unittest.main()
|
data/SEED-platform/seed/seed/celery.py
|
"""
:copyright (c) 2014 - 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Department of Energy) and contributors. All rights reserved.
:author
"""
from __future__ import absolute_import
import os
import celery
import raven
from django.conf import settings
from raven.contrib.celery import register_signal, register_logger_signal
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.main')
class Celery(celery.Celery):
def on_configure(self):
try:
client = raven.Client(settings.RAVEN_CONFIG['dsn'])
register_logger_signal(client)
register_signal(client)
except AttributeError:
pass
app = Celery('seed')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.SEED_CORE_APPS)
if __name__ == '__main__':
app.start()
|
data/Theano/Theano/theano/sandbox/cuda/fftconv.py
|
from __future__ import absolute_import, print_function, division
import string
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda import cuda_available, GpuOp
from theano.ifelse import ifelse
from theano.misc.pycuda_init import pycuda_available
if cuda_available:
from theano.sandbox.cuda import (basic_ops, CudaNdarrayType,
CudaNdarray)
if pycuda_available:
import pycuda.gpuarray
try:
import scikits.cuda
from scikits.cuda import fft, cublas
scikits.cuda.misc.init()
scikits_cuda_available = True
except (ImportError, Exception):
scikits_cuda_available = False
class ScikitsCudaOp(GpuOp):
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def __str__(self):
return self.__class__.__name__
def output_type(self, inp):
raise NotImplementedError
def make_node(self, inp):
inp = basic_ops.gpu_contiguous(
basic_ops.as_cuda_ndarray_variable(inp))
assert inp.dtype == "float32"
return theano.Apply(self, [inp], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2):
if not scikits_cuda_available:
raise RuntimeError(
"scikits.cuda is needed for all GPU fft implementation,"
" including fftconv.")
class CuFFTOp(ScikitsCudaOp):
def output_type(self, inp):
return CudaNdarrayType(
broadcastable=[False] * (inp.type.ndim + 1))
def make_thunk(self, node, storage_map, _, _2):
super(CuFFTOp, self).make_thunk(node, storage_map, _, _2)
from theano.misc.pycuda_utils import to_gpuarray
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
output_shape = list(input_shape)
output_shape[-1] = output_shape[-1] // 2 + 1
output_shape += [2]
output_shape = tuple(output_shape)
z = outputs[0]
if z[0] is None or z[0].shape != output_shape:
z[0] = CudaNdarray.zeros(output_shape)
input_pycuda = to_gpuarray(inputs[0][0])
output_pycuda = to_gpuarray(z[0])
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(input_shape[1:], np.float32, np.complex64,
batch=input_shape[0])
fft.fft(input_pycuda, output_pycuda, plan[0])
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
class CuIFFTOp(ScikitsCudaOp):
def output_type(self, inp):
return CudaNdarrayType(
broadcastable=[False] * (inp.type.ndim - 1))
def make_thunk(self, node, storage_map, _, _2):
super(CuIFFTOp, self).make_thunk(node, storage_map, _, _2)
from theano.misc.pycuda_utils import to_gpuarray
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
output_shape = list(input_shape[:-1])
output_shape[-1] = (output_shape[-1] - 1) * 2
output_shape = tuple(output_shape)
z = outputs[0]
if z[0] is None or z[0].shape != output_shape:
z[0] = CudaNdarray.zeros(output_shape)
input_pycuda = to_gpuarray(inputs[0][0])
output_pycuda = to_gpuarray(z[0])
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(output_shape[1:], np.complex64, np.float32,
batch=output_shape[0])
fft.ifft(input_pycuda, output_pycuda, plan[0])
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def to_complex_gpuarray(x, copyif=False):
"""
Adapted version of theano.misc.pycuda_utils.to_gpuarray that takes
an array with an extra trailing dimension of length 2 for
real/imaginary parts, and turns it into a complex64 PyCUDA
GPUArray.
"""
if not isinstance(x, CudaNdarray):
raise ValueError("We can transfer only CudaNdarray "
"to pycuda.gpuarray.GPUArray")
else:
assert x.shape[-1] == 2
assert x.dtype == 'float32'
size = 1
c_contiguous = True
for i in range(x.ndim - 1, -1, -1):
if x.shape[i] == 1:
continue
if x._strides[i] != size:
c_contiguous = False
break
size *= x.shape[i]
if not c_contiguous:
if copyif:
x = x.copy()
else:
raise ValueError("We were asked to not copy memory, "
"but the memory is not c contiguous.")
px = pycuda.gpuarray.GPUArray(x.shape[:-1], np.complex64, base=x,
gpudata=x.gpudata)
return px
def bptrs(a):
"""
Pointer array when input represents a batch of matrices.
Taken from scikits.cuda tests/test_cublas.py.
"""
return pycuda.gpuarray.arange(a.ptr, a.ptr + a.shape[0] * a.strides[0],
a.strides[0], dtype=cublas.ctypes.c_void_p)
def sc_complex_dot_batched(bx_gpu, by_gpu, bc_gpu, transa='N', transb='N',
handle=None):
"""
Uses cublasCgemmBatched to compute a bunch of complex dot products
in parallel.
"""
if handle is None:
handle = scikits.cuda.misc._global_cublas_handle
assert len(bx_gpu.shape) == 3
assert len(by_gpu.shape) == 3
assert len(bc_gpu.shape) == 3
assert bx_gpu.dtype == np.complex64
assert by_gpu.dtype == np.complex64
assert bc_gpu.dtype == np.complex64
bx_shape = bx_gpu.shape
by_shape = by_gpu.shape
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
transa = string.lower(transa)
transb = string.lower(transb)
if transb in ['t', 'c']:
N, m, k = by_shape
elif transb in ['n']:
N, k, m = by_shape
else:
raise ValueError('invalid value for transb')
if transa in ['t', 'c']:
N2, l, n = bx_shape
elif transa in ['n']:
N2, n, l = bx_shape
else:
raise ValueError('invalid value for transa')
if l != k:
raise ValueError('objects are not aligned')
if N != N2:
raise ValueError('batch sizes are not the same')
if transb == 'n':
lda = max(1, m)
else:
lda = max(1, k)
if transa == 'n':
ldb = max(1, k)
else:
ldb = max(1, n)
ldc = max(1, m)
bx_arr = bptrs(bx_gpu)
by_arr = bptrs(by_gpu)
bc_arr = bptrs(bc_gpu)
cublas.cublasCgemmBatched(handle, transb, transa, m, n, k, alpha,
by_arr.gpudata, lda, bx_arr.gpudata, ldb,
beta, bc_arr.gpudata, ldc, N)
class BatchedComplexDotOp(ScikitsCudaOp):
"""
This version uses cublasCgemmBatched under the hood, instead of
doing multiple cublasCgemm calls.
"""
def make_node(self, inp1, inp2):
inp1 = basic_ops.gpu_contiguous(
basic_ops.as_cuda_ndarray_variable(inp1))
inp2 = basic_ops.gpu_contiguous(
basic_ops.as_cuda_ndarray_variable(inp2))
assert inp1.dtype == "float32"
assert inp2.dtype == "float32"
assert inp1.ndim == 4
assert inp2.ndim == 4
return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])
def output_type(self, inp):
return CudaNdarrayType(broadcastable=[False] * inp.type.ndim)
def make_thunk(self, node, storage_map, _, _2):
super(BatchedComplexDotOp, self).make_thunk(node, storage_map, _, _2)
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
def thunk():
bx = inputs[0]
by = inputs[1]
input_shape_x = bx[0].shape
input_shape_y = by[0].shape
output_shape = (input_shape_x[0], input_shape_x[1],
input_shape_y[2], 2)
bz = outputs[0]
if bz[0] is None or bz[0].shape != output_shape:
bz[0] = CudaNdarray.zeros(output_shape)
input_bx_pycuda = to_complex_gpuarray(bx[0])
input_by_pycuda = to_complex_gpuarray(by[0])
output_b_pycuda = to_complex_gpuarray(bz[0])
sc_complex_dot_batched(input_bx_pycuda, input_by_pycuda,
output_b_pycuda)
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
cufft = CuFFTOp()
cuifft = CuIFFTOp()
batched_complex_dot = BatchedComplexDotOp()
def mult_and_reduce(input_fft_v, filters_fft_v, input_shape=None,
filter_shape=None):
"""
Parameters
----------
input_fft_v
It's (b, ic, i0, i1//2 + 1, 2).
filters_fft_v
It's (oc, ic, i0, i1//2 + 1, 2).
"""
if input_shape is None:
input_shape = input_fft_v.shape
if filter_shape is None:
filter_shape = filters_fft_v.shape
b, ic, i0, i1_f, _ = input_shape
oc = filter_shape[0]
input_r = input_fft_v.reshape((b, ic, i0 * i1_f, 2))
filters_r = filters_fft_v.reshape((oc, ic, i0 * i1_f, 2))
input_s = input_r.dimshuffle(2, 0, 1, 3)
filters_s = filters_r.dimshuffle(2, 1, 0, 3)
output_s = batched_complex_dot(input_s, filters_s)
output_r = output_s.dimshuffle(1, 2, 0, 3)
output = output_r.reshape((b, oc, i0, i1_f, 2))
return output
def conv2d_fft(input, filters, image_shape=None, filter_shape=None,
border_mode='valid', pad_last_dim=False):
"""
Perform a convolution through fft.
Only support input which will be even on the last dimension
(width). All other dimensions can be anything and the filters can
have an even or odd width.
If you must use input which has an odd width, you can either pad
it or use the `pad_last_dim` argument which will do it for you and
take care to strip the padding before returning. Don't use this
argument if you are not sure the input is odd since the padding is
unconditional and will make even input odd, thus leading to
problems.
On valid mode the filters must be smaller than the input.
Parameters
----------
input
(b, ic, i0, i1).
filters
(oc, ic, f0, f1).
border_mode : {'valid', 'full'}
pad_last_dim
Unconditionally pad the last dimension of the input
to to turn it from odd to even. Will strip the
padding before returning the result.
"""
if image_shape is None:
image_shape = input.shape
if filter_shape is None:
filter_shape = filters.shape
b, ic, i0, i1 = image_shape
oc, ic_, f0, f1 = filter_shape
if border_mode == 'valid':
o0 = i0
if pad_last_dim:
o1 = i1 + 1
input_padded = T.zeros((b, ic, o0, o1), dtype='float32')
input_padded = T.set_subtensor(input_padded[:, :, :i0, :i1],
input)
else:
o1 = i1
input_padded = input
filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')
filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],
filters)
elif border_mode == 'full':
o0 = i0 + 2 * (f0 - 1)
o1 = i1 + 2 * (f1 - 1)
if pad_last_dim:
o1 = o1 + 1
filters_padded = T.zeros((oc, ic, o0, o1), dtype='float32')
filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1],
filters)
input_padded = T.zeros((b, ic, o0, o1), dtype='float32')
input_padded = T.set_subtensor(input_padded[:, :, (f0 - 1):(f0 - 1 + i0), (f1 - 1):(f1 - 1 + i1)],
input)
else:
raise ValueError('invalid mode')
input_padded = T.opt.Assert("in conv2d_fft: width is not even")(
input_padded, T.eq(o1 % 2, 0))
input_flat = input_padded.reshape((b * ic, o0, o1))
filters_flat = filters_padded.reshape((oc * ic, o0, o1))
input_fft_flat = cufft(input_flat)
filters_fft_flat = cufft(filters_flat)
input_fft_v_shape = (b, ic, o0, o1 // 2 + 1, 2)
filters_fft_v_shape = (oc, ic, o0, o1 // 2 + 1, 2)
input_fft_v = input_fft_flat.reshape(input_fft_v_shape)
filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)
output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,
input_shape=input_fft_v_shape,
filter_shape=filters_fft_v_shape)
output_fft_flat = output_fft_s.reshape((b * oc, o0, o1 // 2 + 1, 2))
output_flat = cuifft(output_fft_flat)
output_circ = output_flat.reshape((b, oc, o0, o1))
if border_mode == 'valid':
output = output_circ[:, :, (f0-1):(f0-1 + i0-f0+1), (f1-1):(f1-1 + i1-f1+1)]
elif border_mode == 'full':
output = output_circ[:, :, (f0-1):(f0-1 + i0+f0-1), (f1-1):(f1-1 + i1+f1-1)]
else:
raise ValueError('invalid mode')
output = (1.0 / T.cast(o0 * o1, 'float32')) * output
return basic_ops.as_cuda_ndarray_variable(output)
def conv3d_fft(input, filters, image_shape=None, filter_shape=None,
border_mode='valid', pad_last_dim=False):
"""
Perform a convolution through fft.
Only supports input whose shape is even on the last dimension.
All other dimensions can be anything and the filters can
have an even or odd last dimension.
The semantics associated with the last three dimensions
are not important as long as they are in the same order between
the inputs and the filters. For example, when the convolution
is done on a sequence of images, they could be either
(duration, height, width) or (height, width, duration).
If you must use input which has an odd width, you can either pad
it or use the `pad_last_dim` argument which will do it for you and
take care to strip the padding before returning. pad_last_dim checks
that the last dimension is odd before the actual paddding
On valid mode the filters must be smaller than the input.
Parameters
----------
input
(b, ic, i0, i1, i2).
filters
(oc, ic, f0, f1, i2).
border_mode : {'valid', 'full'}.
pad_last_dim
Unconditionally pad the last dimension of the input
to to turn it from odd to even. Will strip the
padding before returning the result.
"""
if image_shape is None:
image_shape = input.shape
if filter_shape is None:
filter_shape = filters.shape
b, ic, i0, i1, i2 = image_shape
oc, ic_, f0, f1, f2 = filter_shape
is_odd = T.eq(T.mod(input.shape[4], 2), 1)
if border_mode == 'valid':
o0 = i0
o1 = i1
o2 = i2
input_padded = input
if pad_last_dim:
o2 = ifelse(is_odd, o2 + 1, o2)
input_padded = T.zeros((b, ic, o0, o1, o2), dtype='float32')
input_padded = T.set_subtensor(input_padded[:, :, :i0, :i1, :i2],
input)
filters_padded = T.zeros((oc, ic, o0, o1, o2), dtype='float32')
filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1, :f2],
filters)
elif border_mode == 'full':
o0 = i0 + 2 * (f0 - 1)
o1 = i1 + 2 * (f1 - 1)
o2 = i2 + 2 * (f2 - 1)
if pad_last_dim:
o2 = ifelse(is_odd, o2 + 1, o2)
filters_padded = T.zeros((oc, ic, o0, o1, o2), dtype='float32')
filters_padded = T.set_subtensor(filters_padded[:, :, :f0, :f1, :f2],
filters)
input_padded = T.zeros((b, ic, o0, o1, o2), dtype='float32')
input_padded = T.set_subtensor(input_padded[:, :, (f0 - 1):(f0 - 1 + i0), (f1 - 1):(f1 - 1 + i1), (f2 - 1):(f2 - 1 + i2)],
input)
else:
raise ValueError('invalid mode')
input_flat = input_padded.reshape((b * ic, o0, o1, o2))
filters_flat = filters_padded.reshape((oc * ic, o0, o1, o2))
input_fft_flat = cufft(input_flat)
filters_fft_flat = cufft(filters_flat)
input_fft_v_shape = (b, ic, o0 * o1, o2 // 2 + 1, 2)
filters_fft_v_shape = (oc, ic, o0 * o1, o2 // 2 + 1, 2)
input_fft_v = input_fft_flat.reshape(input_fft_v_shape)
filters_fft_v = filters_fft_flat.reshape(filters_fft_v_shape)
output_fft_s = mult_and_reduce(input_fft_v, filters_fft_v,
input_shape=input_fft_v_shape,
filter_shape=filters_fft_v_shape)
output_fft_flat = output_fft_s.reshape((b * oc, o0, o1, o2 // 2 + 1, 2))
output_flat = cuifft(output_fft_flat)
output_circ = output_flat.reshape((b, oc, o0, o1, o2))
if border_mode == 'valid':
output = output_circ[:, :, (f0-1):(f0-1 + i0-f0+1), (f1-1):(f1-1 + i1-f1+1), (f2-1):(f2-1 + i2-f2+1)]
elif border_mode == 'full':
output = output_circ[:, :, (f0-1):(f0-1 + i0+f0-1), (f1-1):(f1-1 + i1+f1-1), (f2-1):(f2-1 + i2+f2-1)]
else:
raise ValueError('invalid mode')
output = (1.0 / T.cast(o0 * o1 * o2, 'float32')) * output
return basic_ops.as_cuda_ndarray_variable(output)
|
data/Stiivi/brewery/brewery/ds/elasticsearch_streams.py
|
import base
from brewery import dq
import time
from brewery.metadata import expand_record
try:
from pyes.es import ES
except ImportError:
from brewery.utils import MissingPackage
pyes = MissingPackage("pyes", "ElasticSearch streams", "http://www.elasticsearch.org/")
class ESDataSource(base.DataSource):
"""docstring for ClassName
"""
def __init__(self, document_type, database=None, host=None, port=None,
expand=False, **elasticsearch_args):
"""Creates a ElasticSearch data source stream.
:Attributes:
* document_type: elasticsearch document_type name
* database: database name
* host: elasticsearch database server host, default is ``localhost``
* port: elasticsearch port, default is ``27017``
* expand: expand dictionary values and treat children as top-level keys with dot '.'
separated key path to the child..
"""
self.document_type = document_type
self.database_name = database
self.host = host
self.port = port
self.elasticsearch_args = elasticsearch_args
self.expand = expand
self.connection = None
self._fields = None
def initialize(self):
"""Initialize ElasticSearch source stream:
"""
args = self.elasticsearch_args.copy()
server = ""
if self.host:
server = self.host
if self.port:
server += ":" + self.port
self.connection = ES(server, **args)
self.connection.default_indices = self.database_name
self.connection.default_types = self.document_type
def read_fields(self, limit=0):
keys = []
probes = {}
def probe_record(record, parent=None):
for key, value in record.items():
if parent:
full_key = parent + "." + key
else:
full_key = key
if self.expand and type(value) == dict:
probe_record(value, full_key)
continue
if not full_key in probes:
probe = dq.FieldTypeProbe(full_key)
probes[full_key] = probe
keys.append(full_key)
else:
probe = probes[full_key]
probe.probe(value)
for record in self.document_type.find(limit=limit):
probe_record(record)
fields = []
for key in keys:
probe = probes[key]
field = base.Field(probe.field)
storage_type = probe.unique_storage_type
if not storage_type:
field.storage_type = "unknown"
elif storage_type == "unicode":
field.storage_type = "string"
else:
field.storage_type = "unknown"
field.concrete_storage_type = storage_type
fields.append(field)
self.fields = list(fields)
return self.fields
def rows(self):
if not self.connection:
raise RuntimeError("Stream is not initialized")
from pyes.query import MatchAllQuery
fields = self.fields.names()
results = self.connection.search(MatchAllQuery(), search_type="scan", timeout="5m", size="200")
return ESRowIterator(results, fields)
def records(self):
if not self.connection:
raise RuntimeError("Stream is not initialized")
from pyes.query import MatchAllQuery
results = self.connection.search(MatchAllQuery(), search_type="scan", timeout="5m", size="200")
return ESRecordIterator(results, self.expand)
class ESRowIterator(object):
"""Wrapper for ElasticSearch ResultSet to be able to return rows() as tuples and records() as
dictionaries"""
def __init__(self, resultset, field_names):
self.resultset = resultset
self.field_names = field_names
def __getitem__(self, index):
record = self.resultset.__getitem__(index)
array = []
for field in self.field_names:
value = record
for key in field.split('.'):
if key in value:
value = value[key]
else:
break
array.append(value)
return tuple(array)
class ESRecordIterator(object):
"""Wrapper for ElasticSearch ResultSet to be able to return rows() as tuples and records() as
dictionaries"""
def __init__(self, resultset, expand=False):
self.resultset = resultset
self.expand = expand
def __getitem__(self, index):
def expand_record(record, parent=None):
ret = {}
for key, value in record.items():
if parent:
full_key = parent + "." + key
else:
full_key = key
if type(value) == dict:
expanded = expand_record(value, full_key)
ret.update(expanded)
else:
ret[full_key] = value
return ret
record = self.resultset.__getitem__(index)
if not self.expand:
return record
else:
return expand_record(record)
class ESDataTarget(base.DataTarget):
"""docstring for ClassName
"""
def __init__(self, document_type, database="test", host="127.0.0.1", port="9200",
truncate=False, expand=False, **elasticsearch_args):
"""Creates a ElasticSearch data target stream.
:Attributes:
* document_ElasticSearch elasticsearch document_type name
* database: database name
* host: ElasticSearch database server host, default is ``localhost``
* port: ElasticSearch port, default is ``9200``
* expand: expand dictionary values and treat children as top-level keys with dot '.'
separated key path to the child..
* truncate: delete existing data in the document_type. Default: False
"""
self.document_type = document_type
self.database_name = database
self.host = host
self.port = port
self.elasticsearch_args = elasticsearch_args
self.expand = expand
self.truncate = truncate
self._fields = None
def initialize(self):
"""Initialize ElasticSearch source stream:
"""
from pyes.es import ES
from pyes.exceptions import IndexAlreadyExistsException
args = self.elasticsearch_args.copy()
server = ""
if self.host:
server = self.host
if self.port:
server += ":" + self.port
create = args.pop("create", False)
replace = args.pop("replace", False)
self.connection = ES(server, **args)
self.connection.default_indices = self.database_name
self.connection.default_types = self.document_type
created = False
if create:
try:
self.connection.create_index(self.database_name)
self.connection.refresh(self.database_name)
created = True
except IndexAlreadyExistsException:
pass
if replace and not created:
self.connection.delete_index_if_exists(self.database_name)
time.sleep(2)
self.connection.create_index(self.database_name)
self.connection.refresh(self.database_name)
if self.truncate:
self.connection.delete_mapping(self.database_name, self.document_type)
self.connection.refresh(self.database_name)
def append(self, obj):
record = obj
if not isinstance(obj, dict):
record = dict(zip(self.fields.names(), obj))
if self.expand:
record = expand_record(record)
id = record.get('id') or record.get('_id')
self.connection.index(record, self.database_name, self.document_type, id, bulk=True)
def finalize(self):
self.connection.flush_bulk(forced=True)
|
data/StorjOld/upstream/tests/test_streamer.py
|
import os
import unittest
import mock
from upstream.shard import Shard
from upstream.streamer import Streamer
from upstream.exc import ConnectError, FileError, ShardError, ResponseError
class TestStreamer(unittest.TestCase):
def setUp(self):
self.stream = Streamer("http://node1.metadisk.org")
self.orig_hash = None
self.uploadfile = "tests/1k.testfile"
self.downloadfile = "download.testfile"
self.shard = Shard(
"2032e4fd19d4ab49a74ead0984a5f672c26e60da6e992eaf51f05dc874e94bd7",
"1b1f463cef1807a127af668f3a4fdcc7977c647bf2f357d9fa125f13548b1d14"
)
def tearDown(self):
del self.stream
del self.orig_hash
del self.uploadfile
try:
os.remove(self.downloadfile)
except:
pass
try:
os.remove(self.shard.filehash)
except:
pass
del self.downloadfile
del self.shard
def test_initialization(self):
self.assertEqual(self.stream.server, "http://node1.metadisk.org")
def test_check_connectivity(self):
def _failing_connection():
Streamer("http://does.not.exist")
self.assertRaises(ConnectError, _failing_connection)
@mock.patch('requests.post')
def test_upload_form_encoded(self, post):
pass
@mock.patch('requests.post')
def test_upload_sharded_encoded(self, post):
with self.assertRaises(NotImplementedError):
self.stream._upload_sharded_encoded('http://fake.url', 'fake.path')
@mock.patch('requests.post')
def test_filestream(self, post):
with self.assertRaises(NotImplementedError):
self.stream._filestream('fake.path')
def test_upload(self):
self.shard = self.stream.upload(self.uploadfile)
self.assertEqual(
self.shard.filehash,
"2032e4fd19d4ab49a74ead0984a5f672"
"c26e60da6e992eaf51f05dc874e94bd7")
self.assertEqual(
self.shard.decryptkey,
"1b1f463cef1807a127af668f3a4fdcc7"
"977c647bf2f357d9fa125f13548b1d14")
def _failing_upload():
self.stream.upload("not-a-real-file")
self.assertRaises(FileError, _failing_upload)
def test_upload_patched_404(self):
self.stream._upload_form_encoded = mock.MagicMock()
self.stream._upload_form_encoded.return_value()
self.stream._upload_form_encoded.return_value.status_code = 404
def _fourohfour():
self.stream.upload(self.uploadfile)
with self.assertRaises(ResponseError) as ex:
_fourohfour()
self.assertEqual(ex.message, "API call not found.")
def test_upload_patched_402(self):
self.stream._upload_form_encoded = mock.MagicMock()
self.stream._upload_form_encoded.return_value()
self.stream._upload_form_encoded.return_value.status_code = 402
def _fourohtwo():
self.stream.upload(self.uploadfile)
with self.assertRaises(ResponseError):
_fourohtwo()
def test_upload_patched_500(self):
self.stream._upload_form_encoded = mock.MagicMock()
self.stream._upload_form_encoded.return_value()
self.stream._upload_form_encoded.return_value.status_code = 500
def _fivehundred():
self.stream.upload(self.uploadfile)
with self.assertRaises(ResponseError) as ex:
_fivehundred()
self.assertEqual(ex.message, "Server error.")
def test_upload_patched_501(self):
self.stream._upload_form_encoded = mock.MagicMock()
self.stream._upload_form_encoded.return_value()
self.stream._upload_form_encoded.return_value.status_code = 501
self.stream._upload_form_encoded.return_value.reason =\
"Not Implemented"
def _fiveohone():
self.stream.upload(self.uploadfile)
with self.assertRaises(ResponseError) as ex:
_fiveohone()
self.assertEqual(ex.message,
"Received status code 501 Not Implemented")
def test_upload_check_path(self):
homedir = os.path.expanduser(self.uploadfile)
result = self.stream.check_path(self.uploadfile)
self.assertEqual(homedir, result)
with self.assertRaises(FileError) as ex:
self.stream.check_path('~/does-not-exist')
self.assertEqual(
ex.message, '~/does-not-exist not a file or not found')
def test_download(self):
r = self.stream.download(self.shard)
self.assertTrue(r)
self.assertEquals(r.status_code, 200)
self.assertEqual(len(r.content), 1024)
def test_download_exception(self):
self.shard.filehash = self.shard.filehash[:-5]
with self.assertRaises(ResponseError) as ex:
self.stream.download(self.shard)
self.assertEqual(ex.exception.response.status_code, 404)
def test_download_empty_shard(self):
shard = Shard()
with self.assertRaises(ShardError) as e:
self.stream.download(shard)
self.assertEqual(str(e.exception), "Shard missing filehash.")
|
data/Teradata/PyTd/teradata/pulljson.py
|
"""A pull parser for parsing JSON streams"""
import sys
import decimal
import re
import json
import logging
from . import util
if sys.version_info[0] == 2:
from StringIO import StringIO
else:
from io import StringIO
logger = logging.getLogger(__name__)
OBJECT = "OBJECT"
ARRAY = "ARRAY"
FIELD = "FIELD"
STRING = "STRING"
NUMBER = "NUMBER"
BOOLEAN = "BOOLEAN"
NULL = "null"
TRUE = "true"
FALSE = "false"
START_OBJECT = "START_OBJECT"
START_ARRAY = "START_ARRAY"
FIELD_NAME = "FIELD_NAME"
FIELD_VALUE = "FIELD_VALUE"
ARRAY_VALUE = "ARRAY_VALUE"
END_OBJECT = "END_OBJECT"
END_ARRAY = "END_ARRAY"
JSON_SYNTAX_ERROR = "JSON_SYNTAX_ERROR"
JSON_INCOMPLETE_ERROR = "JSON_INCOMPLETE_ERROR"
JSON_UNEXPECTED_ELEMENT_ERROR = "JSON_UNEXPECTED_ELEMENT_ERROR"
class JSONPullParser (object):
def __init__(self, stream, size=2 ** 16):
"""Initialize pull parser with a JSON stream."""
self.stream = stream
self.size = size
self.node = None
self.value = ""
self.valueType = None
self.tokens = []
self.tokenIndex = 0
self.halfToken = ""
self.pattern = re.compile('([\[\]{}:\\\\",])')
def expectObject(self):
"""Raise JSONParseError if next event is not the start of an object."""
event = self.nextEvent()
if event.type != START_OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_OBJECT but got: " + str(event))
def expectArray(self):
"""Raise JSONParseError if next event is not the start of an array."""
event = self.nextEvent()
if event.type != START_ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_ARRAY but got: " + str(event))
return JSONArrayIterator(self)
def expectField(self, expectedName, expectedType=None, allowNull=False,
readAll=False):
"""Raise JSONParseError if next event is not the expected field with
expected type else return the field value. If the next field is
an OBJECT or ARRAY, only return whole object or array if
readAll=True."""
event = self.nextEvent()
if event.type != FIELD_NAME:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected FIELD_NAME but got: " + str(event))
if event.value != expectedName:
raise JSONParseError(JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedName + " field but got " +
event.value + " instead.")
return self._expectValue(FIELD_VALUE, expectedType, allowNull, readAll)
def expectArrayValue(self, expectedType=None, allowNull=False,
readAll=False):
"""Raise JSONParseError if next event is not an array element with
the expected type else return the field value. If the next value
is an OBJECT or ARRAY, only return whole object or array if
readAll=True."""
return self._expectValue(ARRAY_VALUE, expectedType, allowNull, readAll)
def _expectValue(self, eventType, expectedType, allowNull, readAll):
event = self.nextEvent()
if event.type == eventType:
if allowNull and event.valueType == NULL:
return None
elif expectedType is not None and event.valueType != expectedType:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " + expectedType +
" but got " + event.valueType + " instead.")
else:
return event.value
else:
if eventType == ARRAY_VALUE:
if event.node.parent is None or event.node.parent != ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected array element but not in an array.")
if event.type == START_OBJECT:
if expectedType is not None and expectedType != OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedType + " but got an object instead.")
elif expectedType is None or readAll:
return self.readObject(event)
elif event.type == START_ARRAY:
if expectedType is not None and expectedType != ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR, "Expected " +
expectedType + " but got array instead.")
if expectedType is None or readAll:
return self.readArray(event)
else:
return JSONArrayIterator(self)
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + str(event))
def readObject(self, event=None):
"""Read and return a JSON object."""
if event is None:
event = self.nextEvent()
popRequired = False
else:
popRequired = True
if event is None:
return None
if event.type != START_OBJECT:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_OBJECT but got " + event.type + " instead.")
obj = self._load(event)
if popRequired:
self._pop()
return obj
def readArray(self, event=None):
"""Read and return a JSON array."""
if event is None:
event = self.nextEvent()
popRequired = False
else:
popRequired = True
if event is None:
return None
if event.type != START_ARRAY:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Expected START_ARRAY but got " + event.type + " instead.")
arr = self._load(event)
if popRequired:
self._pop()
return arr
def nextEvent(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
try:
return self.__next__()
except StopIteration:
return None
def next(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
return self.__next__()
def __next__(self):
"""Iterator method, return next JSON event from the stream, raises
StopIteration() when complete."""
while True:
try:
token = self.tokens[self.tokenIndex]
self.tokenIndex += 1
if token == "" or token.isspace():
pass
elif token == '{':
return self._push(OBJECT)
elif token == '}':
if self.node.type == FIELD:
self.tokenIndex -= 1
event = self._pop()
if event is not None:
return event
elif self.node.type == OBJECT:
return self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A closing curly brace ('}') is only expected "
"at the end of an object.")
elif token == '[':
if self.node is not None and self.node.type == OBJECT:
raise JSONParseError(
JSON_SYNTAX_ERROR, "An array in an object must "
"be preceded by a field name.")
return self._push(ARRAY)
elif token == ']':
if self.valueType is not None:
self.tokenIndex -= 1
event = self._arrayValue()
if event is not None:
return event
elif self.node.type == ARRAY:
if self.node.lastIndex == self.node.arrayLength:
self.node.arrayLength += 1
return self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR, "A closing bracket (']') "
"is only expected at the end of an array.")
elif token == ':':
if self.node.type == OBJECT:
if self.value != "" and self.valueType == STRING:
event = self._push(FIELD, self.value)
self.value = ""
self.valueType = None
return event
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Name for name/value pairs cannot be empty.")
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A colon (':') can only following a field "
"name within an object.")
elif token == ',':
if self.node.type == ARRAY:
event = self._arrayValue()
self.node.arrayLength += 1
elif self.node.type == FIELD:
event = self._pop()
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"A comma (',') is only expected between fields "
"in objects or elements of an array.")
if event is not None:
return event
else:
if self.valueType is not None:
raise JSONParseError(
JSON_SYNTAX_ERROR, "Extra name or value found "
"following: " + str(self.value))
elif self.node is None:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Input must start with either an "
"OBJECT ('{') or ARRAY ('['), got '" + token +
"' instead.")
elif token == '"':
escape = False
while True:
try:
token = self.tokens[self.tokenIndex]
self.tokenIndex += 1
if token == "":
pass
elif escape:
escape = False
self.value += token
elif token == '"':
break
elif token == '\\':
escape = True
else:
self.value += token
except IndexError:
data = self.stream.read(self.size)
if data == "":
raise JSONParseError(
JSON_INCOMPLETE_ERROR,
"Reached end of input before " +
"reaching end of string.")
self.tokens = self.pattern.split(data)
self.tokenIndex = 0
self.valueType = STRING
else:
token = token.strip()
if self.tokenIndex == len(self.tokens):
self.halfToken = token
raise IndexError
elif token[0].isdigit() or token[0] == '-':
self.value = decimal.Decimal(token)
self.valueType = NUMBER
elif token == "null":
self.value = None
self.valueType = NULL
elif token == "true":
self.value = True
self.valueType = BOOLEAN
elif token == "false":
self.value = False
self.valueType = BOOLEAN
else:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Unexpected token: " + token)
except IndexError:
data = self.stream.read(self.size)
if data == "":
if self.node is not None:
raise JSONParseError(
JSON_INCOMPLETE_ERROR, "Reached end of input "
"before reaching end of JSON structures.")
else:
raise StopIteration()
return None
logger.trace(data)
self.tokens = self.pattern.split(data)
self.tokenIndex = 0
if self.halfToken is not None:
self.tokens[0] = self.halfToken + self.tokens[0]
self.halfToken = None
def _load(self, event):
if event.type == START_OBJECT:
value = start = "{"
end = "}"
elif event.type == START_ARRAY:
value = start = "["
end = "]"
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + event.type)
count = 1
tokens = self.tokens
tokenIndex = self.tokenIndex
inString = False
inEscape = False
try:
while True:
startIndex = tokenIndex
for token in tokens[startIndex:]:
tokenIndex += 1
if token == "":
pass
elif inString:
if inEscape:
inEscape = False
elif token == '"':
inString = False
elif token == '\\':
inEscape = True
elif token == '"':
inString = True
elif token == start:
count += 1
elif token == end:
count -= 1
if count == 0:
value += "".join(tokens[startIndex:tokenIndex])
raise StopIteration()
value += "".join(tokens[startIndex:])
data = self.stream.read(self.size)
if data == "":
raise JSONParseError(
JSON_INCOMPLETE_ERROR, "Reached end of input before "
"reaching end of JSON structures.")
tokens = self.pattern.split(data)
tokenIndex = 0
except StopIteration:
pass
self.tokens = tokens
self.tokenIndex = tokenIndex
try:
return json.loads(value, parse_float=decimal.Decimal,
parse_int=decimal.Decimal)
except ValueError as e:
raise JSONParseError(JSON_SYNTAX_ERROR, "".join(e.args))
def _push(self, nodeType, value=None):
if self.node is not None and self.node.type == FIELD:
self.node.valueType = nodeType
self.node = JSONNode(self.node, nodeType, value)
if self.node.parent is not None and self.node.parent.type == ARRAY:
self.node.arrayIndex = self.node.parent.arrayLength
if self.node.parent.lastIndex == self.node.parent.arrayLength:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Missing comma separating array elements.")
self.node.parent.lastIndex = self.node.parent.arrayLength
return self.node.startEvent()
def _pop(self):
node = self.node
self.node = self.node.parent
if node.valueType is None:
node.valueType = self.valueType
node.value = self.value
self.value = ""
self.valueType = None
if node.type == FIELD and node.valueType is None:
raise JSONParseError(
JSON_SYNTAX_ERROR, "Expected value for field: " + node.name)
return node.endEvent()
def _arrayValue(self):
endOfArray = self.node.lastIndex == self.node.arrayLength
if self.valueType is None and endOfArray:
pass
elif self.valueType is None:
raise JSONParseError(
JSON_SYNTAX_ERROR,
"Expected value for array element at index: " +
str(self.node.arrayLength))
else:
event = JSONEvent(
self.node, ARRAY_VALUE, self.value, self.valueType,
self.node.arrayLength)
self.node.lastIndex = self.node.arrayLength
self.value = ""
self.valueType = None
return event
def __iter__(self):
return self
class JSONParseError(Exception):
def __init__(self, code, msg):
self.args = (code, msg)
self.code = code
self.msg = msg
class JSONNode (object):
def __init__(self, parent, nodeType, name=None, value=None,
valueType=None):
self.parent = parent
self.type = nodeType
self.name = name
self.value = value
self.valueType = valueType
self.arrayIndex = None
self.arrayLength = None
self.lastIndex = -1
if nodeType == ARRAY:
self.arrayLength = 0
def startEvent(self):
if self.type == ARRAY:
return JSONEvent(self, START_ARRAY, arrayIndex=self.arrayIndex)
elif self.type == OBJECT:
return JSONEvent(self, START_OBJECT, arrayIndex=self.arrayIndex)
elif self.type == FIELD:
return JSONEvent(self, FIELD_NAME, self.name)
def endEvent(self):
if self.type == ARRAY:
return JSONEvent(self, END_ARRAY, arrayIndex=self.arrayIndex,
arrayLength=self.arrayLength)
elif self.type == OBJECT:
return JSONEvent(self, END_OBJECT, arrayIndex=self.arrayIndex)
elif self.type == FIELD and self.valueType not in (OBJECT, ARRAY):
return JSONEvent(self, FIELD_VALUE, self.value, self.valueType)
class JSONEvent (object):
def __init__(self, node, eventType, value=None, valueType=None,
arrayIndex=None, arrayLength=None):
self.node = node
self.type = eventType
self.value = value
self.valueType = valueType
self.arrayIndex = arrayIndex
self.arrayLength = arrayLength
def __repr__(self):
text = "JSONEvent (type=" + self.type
if self.value is not None:
text += ", value=" + str(self.value)
if self.valueType is not None:
text += ", valueType=" + str(self.valueType)
if self.arrayIndex is not None:
text += ", arrayIndex=" + str(self.arrayIndex)
if self.arrayLength is not None:
text += ", arrayLength=" + str(self.arrayLength)
text += ")"
return text
class JSONArrayIterator (object):
def __init__(self, parser):
self.parser = parser
self.complete = False
def __iter__(self):
return self
def __next__(self):
if self.complete:
raise StopIteration()
else:
event = self.parser.nextEvent()
if event.type == START_OBJECT:
return self.parser.readObject(event)
elif event.type == START_ARRAY:
return self.parser.readArray(event)
elif event.type == ARRAY_VALUE:
return event.value
elif event.type == END_ARRAY:
self.complete = True
raise StopIteration()
else:
raise JSONParseError(
JSON_UNEXPECTED_ELEMENT_ERROR,
"Unexpected event: " + str(event))
def next(self):
return self.__next__()
|
data/Theano/Theano/theano/tensor/nnet/tests/test_neighbours.py
|
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import numpy
import unittest
import theano
from theano import shared, function
import theano.tensor as T
from theano.tensor.nnet.neighbours import images2neibs, neibs2images, Images2Neibs
from theano.tests import unittest_tools
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
class T_Images2Neibs(unittest_tools.InferShapeTester):
mode = mode_without_gpu
op = Images2Neibs
dtypes = ['int64', 'float32', 'float64']
def test_neibs(self):
for shape, pshape in [((10, 7, 18, 18), (2, 2)),
((10, 7, 6, 18), (3, 2)),
((5, 7, 66, 66), (33, 33)),
((5, 7, 68, 66), (34, 33))
]:
for border in ['valid', 'ignore_borders']:
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype
).reshape(shape))
neib_shape = T.as_tensor_variable(pshape)
f = function([],
images2neibs(images, neib_shape, mode=border),
mode=self.mode)
neibs = f()
g = function([],
neibs2images(neibs, neib_shape, images.shape),
mode=self.mode)
assert any([isinstance(node.op, self.op)
for node in f.maker.fgraph.toposort()])
assert numpy.allclose(images.get_value(borrow=True), g())
def test_neibs_manual(self):
shape = (2, 3, 4, 4)
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype
).reshape(shape))
neib_shape = T.as_tensor_variable((2, 2))
for border in ['valid', 'ignore_borders']:
f = function([], images2neibs(images, neib_shape, mode=border),
mode=self.mode)
assert any([isinstance(node.op, self.op)
for node in f.maker.fgraph.toposort()])
neibs = f()
assert numpy.allclose(neibs,
[[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 12, 13],
[10, 11, 14, 15],
[16, 17, 20, 21],
[18, 19, 22, 23],
[24, 25, 28, 29],
[26, 27, 30, 31],
[32, 33, 36, 37],
[34, 35, 38, 39],
[40, 41, 44, 45],
[42, 43, 46, 47],
[48, 49, 52, 53],
[50, 51, 54, 55],
[56, 57, 60, 61],
[58, 59, 62, 63],
[64, 65, 68, 69],
[66, 67, 70, 71],
[72, 73, 76, 77],
[74, 75, 78, 79],
[80, 81, 84, 85],
[82, 83, 86, 87],
[88, 89, 92, 93],
[90, 91, 94, 95]])
g = function([], neibs2images(neibs, neib_shape, images.shape),
mode=self.mode)
assert numpy.allclose(images.get_value(borrow=True), g())
def test_neibs_manual_step(self):
shape = (2, 3, 5, 5)
for dtype in self.dtypes:
images = shared(numpy.asarray(numpy.arange(numpy.prod(
shape)).reshape(shape), dtype=dtype))
neib_shape = T.as_tensor_variable((3, 3))
neib_step = T.as_tensor_variable((2, 2))
for border in ['valid', 'ignore_borders']:
f = function([],
images2neibs(images, neib_shape, neib_step,
mode=border),
mode=self.mode)
neibs = f()
assert self.op in [type(node.op)
for node in f.maker.fgraph.toposort()]
assert numpy.allclose(neibs,
[[ 0, 1, 2, 5, 6, 7, 10, 11, 12],
[ 2, 3, 4, 7, 8, 9, 12, 13, 14],
[ 10, 11, 12, 15, 16, 17, 20, 21, 22],
[ 12, 13, 14, 17, 18, 19, 22, 23, 24],
[ 25, 26, 27, 30, 31, 32, 35, 36, 37],
[ 27, 28, 29, 32, 33, 34, 37, 38, 39],
[ 35, 36, 37, 40, 41, 42, 45, 46, 47],
[ 37, 38, 39, 42, 43, 44, 47, 48, 49],
[ 50, 51, 52, 55, 56, 57, 60, 61, 62],
[ 52, 53, 54, 57, 58, 59, 62, 63, 64],
[ 60, 61, 62, 65, 66, 67, 70, 71, 72],
[ 62, 63, 64, 67, 68, 69, 72, 73, 74],
[ 75, 76, 77, 80, 81, 82, 85, 86, 87],
[ 77, 78, 79, 82, 83, 84, 87, 88, 89],
[ 85, 86, 87, 90, 91, 92, 95, 96, 97],
[ 87, 88, 89, 92, 93, 94, 97, 98, 99],
[100, 101, 102, 105, 106, 107, 110, 111, 112],
[102, 103, 104, 107, 108, 109, 112, 113, 114],
[110, 111, 112, 115, 116, 117, 120, 121, 122],
[112, 113, 114, 117, 118, 119, 122, 123, 124],
[125, 126, 127, 130, 131, 132, 135, 136, 137],
[127, 128, 129, 132, 133, 134, 137, 138, 139],
[135, 136, 137, 140, 141, 142, 145, 146, 147],
[137, 138, 139, 142, 143, 144, 147, 148, 149]])
def test_neibs_bad_shape(self):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
images = shared(numpy.arange(
numpy.prod(shape), dtype=dtype
).reshape(shape))
for neib_shape in [(3, 2), (2, 3)]:
neib_shape = T.as_tensor_variable(neib_shape)
f = function([], images2neibs(images, neib_shape),
mode=self.mode)
self.assertRaises(TypeError, f)
f = function([],
images2neibs(images, neib_shape,
mode='ignore_borders'),
mode=self.mode)
assert self.op in [type(node.op)
for node in f.maker.fgraph.toposort()]
f()
def test_neibs_wrap_centered_step_manual(self):
expected1 = [[24, 20, 21, 4, 0, 1, 9, 5, 6],
[21, 22, 23, 1, 2, 3, 6, 7, 8],
[23, 24, 20, 3, 4, 0, 8, 9, 5],
[ 9, 5, 6, 14, 10, 11, 19, 15, 16],
[ 6, 7, 8, 11, 12, 13, 16, 17, 18],
[ 8, 9, 5, 13, 14, 10, 18, 19, 15],
[19, 15, 16, 24, 20, 21, 4, 0, 1],
[16, 17, 18, 21, 22, 23, 1, 2, 3],
[18, 19, 15, 23, 24, 20, 3, 4, 0]]
expected2 = [[ 24, 20, 21, 4, 0, 1, 9, 5, 6],
[ 22, 23, 24, 2, 3, 4, 7, 8, 9],
[ 14, 10, 11, 19, 15, 16, 24, 20, 21],
[ 12, 13, 14, 17, 18, 19, 22, 23, 24]]
expected3 = [[19, 15, 16, 24, 20, 21, 4, 0, 1, 9, 5, 6, 14, 10, 11],
[17, 18, 19, 22, 23, 24, 2, 3, 4, 7, 8, 9, 12, 13, 14],
[ 9, 5, 6, 14, 10, 11, 19, 15, 16, 24, 20, 21, 4, 0, 1],
[ 7, 8, 9, 12, 13, 14, 17, 18, 19, 22, 23, 24, 2, 3, 4]]
expected4 = [[23, 24, 20, 21, 22, 3, 4, 0, 1, 2, 8, 9, 5, 6, 7],
[21, 22, 23, 24, 20, 1, 2, 3, 4, 0, 6, 7, 8, 9, 5],
[13, 14, 10, 11, 12, 18, 19, 15, 16, 17, 23, 24, 20, 21, 22],
[11, 12, 13, 14, 10, 16, 17, 18, 19, 15, 21, 22, 23, 24, 20]]
expected5 = [[24, 20, 21, 4, 0, 1, 9, 5, 6],
[22, 23, 24, 2, 3, 4, 7, 8, 9],
[ 9, 5, 6, 14, 10, 11, 19, 15, 16],
[ 7, 8, 9, 12, 13, 14, 17, 18, 19],
[19, 15, 16, 24, 20, 21, 4, 0, 1],
[17, 18, 19, 22, 23, 24, 2, 3, 4]]
expected6 = [[24, 20, 21, 4, 0, 1, 9, 5, 6],
[21, 22, 23, 1, 2, 3, 6, 7, 8],
[23, 24, 20, 3, 4, 0, 8, 9, 5],
[14, 10, 11, 19, 15, 16, 24, 20, 21],
[11, 12, 13, 16, 17, 18, 21, 22, 23],
[13, 14, 10, 18, 19, 15, 23, 24, 20]]
for shp_idx, (shape, neib_shape, neib_step, expected) in enumerate([
[(7, 8, 5, 5), (3, 3), (2, 2), expected1],
[(7, 8, 5, 5), (3, 3), (3, 3), expected2],
[(7, 8, 5, 5), (5, 3), (3, 3), expected3],
[(7, 8, 5, 5), (3, 5), (3, 3), expected4],
[(80, 90, 5, 5), (3, 3), (2, 3), expected5],
[(1025, 9, 5, 5), (3, 3), (3, 2), expected6],
[(1, 1, 5, 1035), (3, 3), (3, 3), None],
[(1, 1, 1045, 5), (3, 3), (3, 3), None],
]):
for dtype in self.dtypes:
images = shared(numpy.asarray(numpy.arange(numpy.prod(
shape)).reshape(shape), dtype=dtype))
neib_shape = T.as_tensor_variable(neib_shape)
neib_step = T.as_tensor_variable(neib_step)
expected = numpy.asarray(expected)
f = function([], images2neibs(images, neib_shape, neib_step,
mode="wrap_centered"),
mode=self.mode)
neibs = f()
if expected.size > 1:
for i in range(shape[0] * shape[1]):
assert numpy.allclose(
neibs[i * expected.shape[0]:
(i + 1) * expected.shape[0], :],
expected + 25 * i), "wrap_centered"
assert self.op in [type(node.op)
for node in f.maker.fgraph.toposort()]
def test_neibs_bad_shape_wrap_centered(self):
shape = (2, 3, 10, 10)
for dtype in self.dtypes:
images = shared(numpy.arange(
numpy.prod(shape), dtype=dtype
).reshape(shape))
for neib_shape in [(3, 2), (2, 3)]:
neib_shape = T.as_tensor_variable(neib_shape)
f = function([], images2neibs(images, neib_shape,
mode="wrap_centered"),
mode=self.mode)
self.assertRaises(TypeError, f)
for shape in [(2, 3, 2, 3), (2, 3, 3, 2)]:
images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([], images2neibs(images, neib_shape,
mode="wrap_centered"),
mode=self.mode)
self.assertRaises(TypeError, f)
shape = (2, 3, 3, 3)
images = shared(numpy.arange(numpy.prod(shape)).reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([],
images2neibs(images, neib_shape, mode="wrap_centered"),
mode=self.mode)
f()
def test_grad_wrap_centered(self):
shape = (2, 3, 6, 6)
images_val = numpy.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (3, 3), mode='wrap_centered')
self.assertRaises(TypeError, unittest_tools.verify_grad,
fn, [images_val], mode=self.mode)
def test_grad_valid(self):
shape = (2, 3, 6, 6)
images_val = numpy.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (2, 2))
unittest_tools.verify_grad(fn, [images_val], mode=self.mode,
eps=0.1)
def fn(images):
return images2neibs(images, (3, 2), (1, 2))
unittest_tools.verify_grad(fn, [images_val], mode=self.mode,
eps=0.1)
def fn(images):
return images2neibs(images, (1, 2), (5, 2))
unittest_tools.verify_grad(fn, [images_val], mode=self.mode,
eps=0.1)
def test_grad_ignore_border(self):
shape = (2, 3, 5, 5)
images_val = numpy.random.rand(*shape).astype('float32')
def fn(images):
return images2neibs(images, (2, 2),
mode='ignore_borders')
unittest_tools.verify_grad(fn, [images_val], mode=self.mode,
eps=0.1)
def test_neibs2images_grad(self):
neibs_val = numpy.random.rand(150, 4)
def fn(neibs):
return neibs2images(neibs, (2, 2), (2, 3, 10, 10))
unittest_tools.verify_grad(fn, [neibs_val], mode=self.mode,
eps=0.1)
def test_neibs_valid_with_inconsistent_borders(self):
shape = (2, 3, 5, 5)
images = T.dtensor4()
images_val = numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape)
def fn(images):
return T.sum(T.sqr(images2neibs(images, (2, 2), mode='valid')),
axis=[0, 1])
f = theano.function([images],
T.sqr(images2neibs(images, (2, 2), mode='valid')),
mode=self.mode)
self.assertRaises(TypeError, f, images_val)
def speed_neibs(self):
shape = (100, 40, 18, 18)
images = shared(numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([], images2neibs(images, neib_shape),
mode=self.mode)
for i in range(1000):
f()
def speed_neibs_wrap_centered(self):
shape = (100, 40, 18, 18)
images = shared(numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape))
neib_shape = T.as_tensor_variable((3, 3))
f = function([],
images2neibs(images, neib_shape, mode="wrap_centered"),
mode=self.mode)
for i in range(1000):
f()
def test_infer_shape(self):
shape = (100, 40, 6, 3)
images = numpy.ones(shape).astype('float32')
x = T.ftensor4()
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(2, 1),
mode='valid')],
[images],
Images2Neibs
)
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(2, 3),
mode='valid')],
[images],
Images2Neibs
)
shape = (100, 40, 5, 4)
images = numpy.ones(shape).astype('float32')
x = T.ftensor4()
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(2, 1),
mode='ignore_borders')],
[images],
Images2Neibs
)
shape = (100, 40, 5, 3)
images = numpy.ones(shape).astype('float32')
x = T.ftensor4()
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(2, 3),
mode='ignore_borders')],
[images],
Images2Neibs
)
shape = (100, 40, 6, 7)
images = numpy.ones(shape).astype('float32')
x = T.ftensor4()
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(2, 2),
mode='ignore_borders')],
[images],
Images2Neibs
)
shape = (100, 40, 5, 10)
images = numpy.ones(shape).astype('float32')
x = T.ftensor4()
f = self._compile_and_check([x],
[images2neibs(
x, neib_shape=(3, 3),
mode='wrap_centered')],
[images],
Images2Neibs
)
if __name__ == '__main__':
unittest.main()
|
data/Lukasa/hyper/test/server.py
|
"""
test/server
~~~~~~~~~~~
This module defines some testing infrastructure that is very useful for
integration-type testing of hyper. It works by spinning up background threads
that run test-defined logic while listening to a background thread.
This very-clever idea and most of its implementation are ripped off from
Andrey Petrov's excellent urllib3 project. I owe him a substantial debt in
ingenuity and about a million beers. The license is available in NOTICES.
"""
import threading
import socket
import sys
from hyper import HTTP20Connection
from hyper.compat import ssl
from hyper.http11.connection import HTTP11Connection
from hpack.hpack import Encoder
from hpack.huffman import HuffmanEncoder
from hpack.huffman_constants import (
REQUEST_CODES, REQUEST_CODES_LENGTH
)
from hyper.tls import NPN_PROTOCOL
class SocketServerThread(threading.Thread):
"""
This method stolen wholesale from shazow/urllib3 under license. See
NOTICES.
:param socket_handler: Callable which receives a socket argument for one
request.
:param ready_event: Event which gets set when the socket handler is
ready to receive requests.
"""
def __init__(self,
socket_handler,
host='localhost',
ready_event=None,
h2=True,
secure=True):
threading.Thread.__init__(self)
self.socket_handler = socket_handler
self.host = host
self.secure = secure
self.ready_event = ready_event
self.daemon = True
if self.secure:
self.cxt = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if ssl.HAS_NPN and h2:
self.cxt.set_npn_protocols([NPN_PROTOCOL])
self.cxt.load_cert_chain(certfile='test/certs/server.crt',
keyfile='test/certs/server.key')
def _start_server(self):
sock = socket.socket()
if sys.platform != 'win32':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.secure:
sock = self.cxt.wrap_socket(sock, server_side=True)
sock.bind((self.host, 0))
self.port = sock.getsockname()[1]
sock.listen(1)
if self.ready_event:
self.ready_event.set()
self.socket_handler(sock)
sock.close()
def _wrap_socket(self, sock):
raise NotImplementedError()
def run(self):
self.server = self._start_server()
class SocketLevelTest(object):
"""
A test-class that defines a few helper methods for running socket-level
tests.
"""
def set_up(self, secure=True, proxy=False):
self.host = None
self.port = None
self.secure = secure if not proxy else False
self.proxy = proxy
self.server_thread = None
def _start_server(self, socket_handler):
"""
Starts a background thread that runs the given socket handler.
"""
ready_event = threading.Event()
self.server_thread = SocketServerThread(
socket_handler=socket_handler,
ready_event=ready_event,
h2=self.h2,
secure=self.secure
)
self.server_thread.start()
ready_event.wait()
self.host = self.server_thread.host
self.port = self.server_thread.port
self.secure = self.server_thread.secure
def get_connection(self):
if self.h2:
if not self.proxy:
return HTTP20Connection(self.host, self.port, self.secure)
else:
return HTTP20Connection('http2bin.org', secure=self.secure,
proxy_host=self.host,
proxy_port=self.port)
else:
if not self.proxy:
return HTTP11Connection(self.host, self.port, self.secure)
else:
return HTTP11Connection('httpbin.org', secure=self.secure,
proxy_host=self.host,
proxy_port=self.port)
def get_encoder(self):
"""
Returns a HPACK encoder set up for responses.
"""
e = Encoder()
e.huffman_coder = HuffmanEncoder(REQUEST_CODES, REQUEST_CODES_LENGTH)
return e
def tear_down(self):
"""
Tears down the testing thread.
"""
self.server_thread.join(0.1)
|
data/adaptivdesign/django-sellmo/sellmo/contrib/reporting/generators/weasyprint/generator.py
|
from weasyprint import HTML
from sellmo.contrib.reporting.generators import AbstractReportGenerator
import logging
logger = logging.getLogger('weasyprint')
logger.handlers = []
class WeasyPrintReportGenerator(AbstractReportGenerator):
@property
def input_formats(self):
return ['html']
@property
def output_formats(self):
return ['pdf']
def get_data(self, writer, frmt):
html = super(WeasyPrintReportGenerator, self).get_data(writer, frmt)
return HTML(string=html).write_pdf()
def get_extension(self, frmt):
return '.' + frmt
def get_mimetype(self, frmt):
if frmt == 'pdf':
return 'application/pdf'
|
data/SickRage/SickRage/lib/sqlalchemy/testing/pickleable.py
|
"""Classes used in pickling tests, need to be at the module level for
unpickling.
"""
from . import fixtures
class User(fixtures.ComparableEntity):
pass
class Order(fixtures.ComparableEntity):
pass
class Dingaling(fixtures.ComparableEntity):
pass
class EmailUser(User):
pass
class Address(fixtures.ComparableEntity):
pass
class Child1(fixtures.ComparableEntity):
pass
class Child2(fixtures.ComparableEntity):
pass
class Parent(fixtures.ComparableEntity):
pass
class Screen(object):
def __init__(self, obj, parent=None):
self.obj = obj
self.parent = parent
class Foo(object):
def __init__(self, moredata):
self.data = 'im data'
self.stuff = 'im stuff'
self.moredata = moredata
__hash__ = object.__hash__
def __eq__(self, other):
return other.data == self.data and \
other.stuff == self.stuff and \
other.moredata == self.moredata
class Bar(object):
def __init__(self, x, y):
self.x = x
self.y = y
__hash__ = object.__hash__
def __eq__(self, other):
return other.__class__ is self.__class__ and \
other.x == self.x and \
other.y == self.y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class OldSchool:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return other.__class__ is self.__class__ and \
other.x == self.x and \
other.y == self.y
class OldSchoolWithoutCompare:
def __init__(self, x, y):
self.x = x
self.y = y
class BarWithoutCompare(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class NotComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return NotImplemented
class BrokenComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
|
data/YelpArchive/pushmanager/pushmanager/tests/test_core_requesthandler.py
|
import mock
import testify as T
import tornado.httpserver
from pushmanager.core.requesthandler import RequestHandler
from pushmanager.core.requesthandler import get_base_url
from pushmanager.core.settings import Settings
from pushmanager.testing.mocksettings import MockedSettings
class RequestHandlerTest(T.TestCase):
def test_get_api_page(self):
MockedSettings['api_app'] = {'port': 8043, 'servername': 'push.test.com'}
with mock.patch.dict(Settings, MockedSettings):
T.assert_equal(
RequestHandler.get_api_page("pushes"),
"http://push.test.com:8043/api/pushes"
)
def test_get_base_url_empty_headers(self):
MockedSettings['main_app'] = {'port': 1111, 'servername': 'example.com'}
request = tornado.httpserver.HTTPRequest('GET', '')
request.protocol = 'https'
with mock.patch.dict(Settings, MockedSettings):
T.assert_equal(
get_base_url(request),
'https://example.com:1111'
)
Settings['main_app']['port'] = 443
T.assert_equal(
get_base_url(request),
'https://example.com'
)
def test_get_base_url_proto_header(self):
MockedSettings['main_app'] = {'port': 1111, 'servername': 'example.com'}
request = tornado.httpserver.HTTPRequest('GET', '')
request.protocol = 'https'
request.headers['X-Forwarded-Proto'] = 'http'
with mock.patch.dict(Settings, MockedSettings):
T.assert_equal(
get_base_url(request),
'http://example.com:1111'
)
Settings['main_app']['port'] = 80
T.assert_equal(
get_base_url(request),
'http://example.com'
)
def test_get_base_url_port_header(self):
MockedSettings['main_app'] = {'port': 1111, 'servername': 'example.com'}
request = tornado.httpserver.HTTPRequest('GET', '')
request.protocol = 'https'
request.headers['X-Forwarded-Port'] = '4321'
with mock.patch.dict(Settings, MockedSettings):
T.assert_equal(
get_base_url(request),
'https://example.com:4321'
)
request.headers['X-Forwarded-Port'] = 443
T.assert_equal(
get_base_url(request),
'https://example.com'
)
def test_RequestHandler_get_base_url(self):
MockedSettings['main_app'] = {'port': 1111, 'servername': 'example.com'}
request = tornado.httpserver.HTTPRequest('GET', '')
request.protocol = 'https'
class FakeRequest(object):
def __init__(self):
self.request = request
with mock.patch.dict(Settings, MockedSettings):
fake_requesthandler = FakeRequest()
T.assert_equal(
RequestHandler.get_base_url.__func__(fake_requesthandler),
'https://example.com:1111'
)
|
data/ImageEngine/gaffer/python/GafferSceneUI/SceneViewToolbar.py
|
import functools
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
Gaffer.Metadata.registerNode(
GafferSceneUI.SceneView,
plugs = {
"shadingMode" : [
"toolbarLayout:index", 2,
"toolbarLayout:divider", True,
"plugValueWidget:type", "GafferSceneUI.SceneViewToolbar._ShadingModePlugValueWidget",
],
"minimumExpansionDepth" : [
"plugValueWidget:type", "GafferSceneUI.SceneViewToolbar._ExpansionPlugValueWidget",
"toolbarLayout:divider", True,
],
"lookThrough" : [
"plugValueWidget:type", "GafferSceneUI.SceneViewToolbar._LookThroughPlugValueWidget",
"toolbarLayout:divider", True,
"toolbarLayout:label", "",
],
"lookThrough.enabled" : [
"description",
"""
When enabled, locks the view to look through a specific camera in the scene.
By default, the current render camera is used, but this can be changed using the lookThrough.camera
setting.
""",
],
"lookThrough.camera" : [
"description",
"""
Specifies the camera to look through when lookThrough.enabled is on. The default value
means that the current render camera will be used - the paths to other cameras may be specified
to choose another camera."
""",
],
"grid" : [
"plugValueWidget:type", "GafferSceneUI.SceneViewToolbar._GridPlugValueWidget",
],
"gnomon" : [
"plugValueWidget:type", "",
],
}
)
class _ShadingModePlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, parenting = None ) :
menuButton = GafferUI.MenuButton(
image = "shading.png",
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) ),
hasFrame = False,
)
GafferUI.PlugValueWidget.__init__( self, menuButton, plug, parenting = parenting )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __menuDefinition( self ) :
m = IECore.MenuDefinition()
currentName = self.getPlug().getValue()
for name in [ "" ] + GafferSceneUI.SceneView.registeredShadingModes() :
m.append(
"/" + name if name else "Default",
{
"checkBox" : name == currentName,
"command" : functools.partial( Gaffer.WeakMethod( self.__setValue ), name if name != currentName else "" ),
}
)
if not name :
m.append( "/DefaultDivider", { "divider" : True } )
return m
def __setValue( self, value, *unused ) :
self.getPlug().setValue( value )
class _ExpansionPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, parenting = None ) :
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
menuButton = GafferUI.MenuButton( menu=menu, image = "expansion.png", hasFrame=False )
GafferUI.PlugValueWidget.__init__( self, menuButton, plug, parenting = parenting )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __menuDefinition( self ) :
expandAll = bool( self.getPlug().getValue() )
m = IECore.MenuDefinition()
m.append( "/Expand Selection", { "command" : self.getPlug().node().expandSelection, "active" : not expandAll, "shortCut" : "Down" } )
m.append( "/Expand Selection Fully", { "command" : IECore.curry( self.getPlug().node().expandSelection, depth = 999 ), "active" : not expandAll, "shortCut" : "Shift+Down" } )
m.append( "/Collapse Selection", { "command" : self.getPlug().node().collapseSelection, "active" : not expandAll, "shortCut" : "Up" } )
m.append( "/Expand All Divider", { "divider" : True } )
m.append( "/Expand All", { "checkBox" : expandAll, "command" : Gaffer.WeakMethod( self.__toggleMinimumExpansionDepth ) } )
return m
def __toggleMinimumExpansionDepth( self, *unused ) :
self.getPlug().setValue( 0 if self.getPlug().getValue() else 999 )
class _LookThroughPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, parenting = None ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug, parenting = parenting )
with row :
self.__enabledWidget = GafferUI.BoolPlugValueWidget( plug["enabled"], displayMode=GafferUI.BoolWidget.DisplayMode.Switch )
self.__cameraWidget = GafferSceneUI.ScenePathPlugValueWidget(
plug["camera"],
path = GafferScene.ScenePath(
plug.node()["in"],
plug.node().getContext(),
"/",
filter = GafferScene.ScenePath.createStandardFilter( [ "__cameras" ], "Show only cameras" )
),
)
self.__cameraWidget.pathWidget().setFixedCharacterWidth( 13 )
if hasattr( self.__cameraWidget.pathWidget()._qtWidget(), "setPlaceholderText" ) :
self.__cameraWidget.pathWidget()._qtWidget().setPlaceholderText( "Render Camera" )
self._updateFromPlug()
def _updateFromPlug( self ) :
with self.getContext() :
self.__cameraWidget.setEnabled( self.getPlug()["enabled"].getValue() )
class _GridPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, parenting = None ) :
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
menuButton = GafferUI.MenuButton( menu=menu, image = "grid.png", hasFrame=False )
GafferUI.PlugValueWidget.__init__( self, menuButton, plug, parenting = parenting )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __menuDefinition( self ) :
m = IECore.MenuDefinition()
m.append(
"/Show Grid",
{
"checkBox" : self.getPlug()["visible"].getValue(),
"command" : self.getPlug()["visible"].setValue,
}
)
m.append(
"/Show Gnomon",
{
"checkBox" : self.getPlug().node()["gnomon"]["visible"].getValue(),
"command" : self.getPlug().node()["gnomon"]["visible"].setValue,
}
)
return m
|
data/PythonCharmers/python-future/src/libpasteurize/fixes/fix_future_builtins.py
|
"""
Adds this import line:
from builtins import XYZ
for each of the functions XYZ that is used in the module.
"""
from __future__ import unicode_literals
from lib2to3 import fixer_base
from lib2to3.pygram import python_symbols as syms
from lib2to3.fixer_util import Name, Call, in_special_context
from libfuturize.fixer_util import touch_import_top
replaced_builtins = '''filter map zip
ascii chr hex input next oct open round super
bytes dict int range str'''.split()
expression = '|'.join(["name='{0}'".format(name) for name in replaced_builtins])
class FixFutureBuiltins(fixer_base.BaseFix):
BM_compatible = True
run_order = 9
PATTERN = """
power<
({0}) trailer< '(' args=[any] ')' >
rest=any* >
""".format(expression)
def transform(self, node, results):
name = results["name"]
touch_import_top(u'builtins', name.value, node)
|
data/adamchainz/django-mysql/django_mysql/models/fields/__init__.py
|
from django_mysql.models.fields.bit import (
Bit1BooleanField, NullBit1BooleanField
)
from django_mysql.models.fields.dynamic import DynamicField
from django_mysql.models.fields.enum import EnumField
from django_mysql.models.fields.json import JSONField
from django_mysql.models.fields.lists import (
ListCharField, ListTextField
)
from django_mysql.models.fields.sets import (
SetCharField, SetTextField
)
from django_mysql.models.fields.sizes import (
SizedBinaryField, SizedTextField
)
|
data/NVIDIA/DIGITS/digits/inference/__init__.py
|
from __future__ import absolute_import
from .images import *
from .job import InferenceJob
|
data/Piratenfraktion-Berlin/OwnTube/videoportal/migrations/0015_auto__add_field_collection_channel.py
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('videoportal_collection', 'channel',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['videoportal.Channel'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column('videoportal_collection', 'channel_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'videoportal.channel': {
'Meta': {'object_name': 'Channel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'})
},
'videoportal.collection': {
'Meta': {'object_name': 'Collection'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videoportal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videoportal.Video']", 'symmetrical': 'False'})
},
'videoportal.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timecode': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videoportal.Video']"})
},
'videoportal.hotfolder': {
'Meta': {'object_name': 'Hotfolder'},
'activated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videoportal.Channel']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'defaultName': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'folderName': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'videoportal.video': {
'Meta': {'object_name': 'Video'},
'assemblyid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'autoPublish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videoportal.Channel']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'duration': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'encodingDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {'max_length': '1'}),
'linkURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mp3Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp3URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mp4Size': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'mp4URL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'oggSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'oggURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'originalFile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'torrentDone': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'torrentURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videoThumbURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'webmSize': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'webmURL': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['videoportal']
|
data/Toblerity/Shapely/shapely/examples/geoms.py
|
from numpy import asarray
import pylab
from shapely.geometry import Point, LineString, Polygon
polygon = Polygon(((-1.0, -1.0), (-1.0, 1.0), (1.0, 1.0), (1.0, -1.0)))
point_r = Point(-1.5, 1.2)
point_g = Point(-1.0, 1.0)
point_b = Point(-0.5, 0.5)
line_r = LineString(((-0.5, 0.5), (0.5, 0.5)))
line_g = LineString(((1.0, -1.0), (1.8, 0.5)))
line_b = LineString(((-1.8, -1.2), (1.8, 0.5)))
def plot_point(g, o, l):
pylab.plot([g.x], [g.y], o, label=l)
def plot_line(g, o):
a = asarray(g)
pylab.plot(a[:,0], a[:,1], o)
def fill_polygon(g, o):
a = asarray(g.exterior)
pylab.fill(a[:,0], a[:,1], o, alpha=0.5)
def fill_multipolygon(g, o):
for g in g.geoms:
fill_polygon(g, o)
if __name__ == "__main__":
from numpy import asarray
import pylab
fig = pylab.figure(1, figsize=(4, 3), dpi=150)
pylab.axis('tight')
a = asarray(polygon.exterior)
pylab.fill(a[:,0], a[:,1], 'c')
plot_point(point_r, 'ro', 'b')
plot_point(point_g, 'go', 'c')
plot_point(point_b, 'bo', 'd')
plot_line(line_r, 'r')
plot_line(line_g, 'g')
plot_line(line_b, 'b')
pylab.show()
|
data/a-tal/pyweet/pyweet/settings.py
|
"""Pyweet runtime settings."""
import os
class Settings(object):
"""Basic settings object for pyweet."""
API = "rgIYSFIeGBxVXOPy22QzA"
API_SECRET = "VX7ohOHpJm1mXlGX6XS08JcT4Vp8j83QhRNo1SVRevb"
AUTH_FILE = os.path.expanduser("~/.pyweet")
|
data/OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/moves/sys.py
|
from __future__ import absolute_import
from future.utils import PY2
from sys import *
if PY2:
from __builtin__ import intern
|
data/Smartling/api-sdk-python/setup.py
|
''' Copyright 2012 Smartling, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this work except in compliance with the License.
* You may obtain a copy of the License in the LICENSE file, or at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
from setuptools import setup
setup(
name="SmartlingApiSdk",
version = "1.2.5",
author="Smartling, Inc.",
author_email="aartamonov@smartling.com",
description="Smartling python library for file translations",
license='Apache License v2.0',
keywords='translation localization internationalization',
url="https://docs.smartling.com/display/docs/Files+API",
long_description="python SDK to work with Smartling API for file translation",
packages=['smartlingApiSdk', "simplejson24", "example", "test"],
include_package_data = True,
package_data = {
'': ['*.properties', '*.xml'],
},
)
|
data/SpriteLink/NIPAP/nipap-www/nipapwww/config/middleware.py
|
"""Pylons middleware initialization"""
from beaker.middleware import SessionMiddleware
from paste.cascade import Cascade
from paste.registry import RegistryManager
from paste.urlparser import StaticURLParser
from paste.deploy.converters import asbool
from pylons.middleware import ErrorHandler, StatusCodeRedirect
from pylons.wsgiapp import PylonsApp
from routes.middleware import RoutesMiddleware
from nipapwww.config.environment import load_environment
def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
"""Create a Pylons WSGI application and return it
``global_conf``
The inherited configuration for this application. Normally from
the [DEFAULT] section of the Paste ini file.
``full_stack``
Whether this application provides a full WSGI stack (by default,
meaning it handles its own exceptions and errors). Disable
full_stack when this application is "managed" by another WSGI
middleware.
``static_files``
Whether this application serves its own static files; disable
when another web server is responsible for serving them.
``app_conf``
The application's local configuration. Normally specified in
the [app:<name>] section of the Paste ini file (where <name>
defaults to main).
"""
config = load_environment(global_conf, app_conf)
app = PylonsApp(config=config)
app = RoutesMiddleware(app, config['routes.map'], singleton=False)
app = SessionMiddleware(app, config)
if asbool(full_stack):
app = ErrorHandler(app, global_conf, **config['pylons.errorware'])
if asbool(config['debug']):
app = StatusCodeRedirect(app)
else:
app = StatusCodeRedirect(app, [400, 401, 403, 404, 500])
app = RegistryManager(app)
if asbool(static_files):
static_app = StaticURLParser(config['pylons.paths']['static_files'])
app = Cascade([static_app, app])
app.config = config
return app
|
data/SEED-platform/seed/seed/tests/test_utils.py
|
"""
:copyright (c) 2014 - 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from the U.S. Department of Energy) and contributors. All rights reserved.
:author
"""
from django.test import TestCase
from seed.utils.generic import split_model_fields
class DummyClass(object):
"A simple class that has two fields"
field_one = "field_one"
field_two = "field_two"
class TestGenericUtils(TestCase):
def test_split_model_fields(self):
"""
Tests splitting a list of field names based on what fields an
object has.
"""
f1 = 'field_one'
f2 = 'field_two'
f3 = 'no_field_three'
f4 = 'no_field_four'
obj = DummyClass()
fields_to_split = [f1, f2, f3, f4]
obj_fields, non_obj_fields = split_model_fields(obj, fields_to_split)
self.assertEqual(obj_fields, [f1, f2])
self.assertEqual(non_obj_fields, [f3, f4])
fields_to_split = [f1]
obj_fields, non_obj_fields = split_model_fields(obj, fields_to_split)
self.assertEqual(obj_fields, [f1])
self.assertEqual(non_obj_fields, [])
fields_to_split = [f4]
obj_fields, non_obj_fields = split_model_fields(obj, fields_to_split)
self.assertEqual(obj_fields, [])
self.assertEqual(non_obj_fields, [f4])
|
data/adieu/allbuttonspressed/pygments/lexers/_luabuiltins.py
|
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
|
data/MirantisWorkloadMobility/CloudFerry/cloudferry_devlab/cloudferry_devlab/tests/testcases/rollback_verification.py
|
"""
This is module to verify if rollback procedure was executed correctly.
Basically two dictionaries are being compared:
- pre_data: data collected from SRC and DST clusters, is being stored in
file with name which is described in config file.
- data_after: data collected from SRC and DST clusters using data_collector
module, it is being stored in memory as dictionary.
"""
import os
import yaml
import cloudferry_devlab.tests.config as config
from cloudferry_devlab.tests.data_collector import DataCollector
from cloudferry_devlab.tests import functional_test
import cloudferry_devlab.tests.utils as utils
class RollbackVerification(functional_test.FunctionalTest):
def setUp(self):
data_collector = DataCollector(config=config)
self.data_after = utils.convert(data_collector.data_collector())
file_name = config.rollback_params['data_file_names']['PRE']
pre_file_path = os.path.join(self.cloudferry_dir, file_name)
with open(pre_file_path, "r") as f:
self.pre_data = yaml.load(f)
def test_verify_rollback(self):
"""Validate rollback actions run successfuly."""
self.maxDiff = None
msg = 'Comparing "{0}-{1}" resources...'
for cloud in self.data_after:
for service in self.data_after[cloud]:
for resource in self.data_after[cloud][service]:
print(msg.format(service.lower(), resource.lower()))
self.assertEqual(self.data_after[cloud][service][resource],
self.pre_data[cloud][service][resource])
|
data/Netflix/aminator/aminator/plugins/finalizer/tagging_ebs.py
|
"""
aminator.plugins.finalizer.tagging_ebs
======================================
ebs tagging image finalizer
"""
import logging
from os import environ
from aminator.config import conf_action
from aminator.plugins.finalizer.tagging_base import TaggingBaseFinalizerPlugin
from aminator.util.linux import sanitize_metadata
__all__ = ('TaggingEBSFinalizerPlugin',)
log = logging.getLogger(__name__)
class TaggingEBSFinalizerPlugin(TaggingBaseFinalizerPlugin):
_name = 'tagging_ebs'
def add_plugin_args(self):
tagging = super(TaggingEBSFinalizerPlugin, self).add_plugin_args()
context = self._config.context
tagging.add_argument('-n', '--name', dest='name', action=conf_action(context.ami), help='name of resultant AMI (default package_name-version-release-arch-yyyymmddHHMM-ebs')
def _set_metadata(self):
super(TaggingEBSFinalizerPlugin, self)._set_metadata()
context = self._config.context
config = self._config.plugins[self.full_name]
metadata = context.package.attributes
ami_name = context.ami.get('name', None)
if not ami_name:
ami_name = config.name_format.format(**metadata)
context.ami.name = sanitize_metadata('{0}-ebs'.format(ami_name))
def _snapshot_volume(self):
log.info('Taking a snapshot of the target volume')
if not self._cloud.snapshot_volume():
return False
log.info('Snapshot success')
return True
def _register_image(self, block_device_map=None, root_device=None):
log.info('Registering image')
config = self._config.plugins[self.full_name]
if block_device_map is None:
block_device_map = config.default_block_device_map
if root_device is None:
root_device = config.default_root_device
if not self._cloud.register_image(block_device_map, root_device):
return False
log.info('Registration success')
return True
def finalize(self):
log.info('Finalizing image')
self._set_metadata()
if not self._snapshot_volume():
log.critical('Error snapshotting volume')
return False
if not self._register_image():
log.critical('Error registering image')
return False
if not self._add_tags(['snapshot', 'ami']):
log.critical('Error adding tags')
return False
log.info('Image registered and tagged')
self._log_ami_metadata()
return True
def __enter__(self):
context = self._config.context
environ["AMINATOR_STORE_TYPE"] = "ebs"
if context.ami.get("name", None):
environ["AMINATOR_AMI_NAME"] = context.ami.name
return super(TaggingEBSFinalizerPlugin, self).__enter__()
|
data/adaptivdesign/django-sellmo/sellmo/contrib/discount/constants.py
|
from django.utils.translation import ugettext_lazy as _
from .price import DiscountPriceComponent
DISCOUNT = DiscountPriceComponent()
APPLIES_TO_PRODUCT_PRICE = 'product'
APPLIES_TO_SHIPPING_COSTS = 'shipping'
APPLIES_TO_TOTAL = 'total'
APPLIES_TO_CHOICES = (
(APPLIES_TO_PRODUCT_PRICE, _("product price")),
(APPLIES_TO_SHIPPING_COSTS, _("shipping costs")),
(APPLIES_TO_TOTAL, _("order total")),
)
|
data/Shopify/shopify_django_app/settings.py
|
import os
from shopify_settings import *
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
try:
from djangoappengine.settings_base import *
USING_APP_ENGINE = True
except ImportError:
USING_APP_ENGINE = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(SITE_ROOT, 'db-development.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'static'),
)
SECRET_KEY = '
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'shopify_app.context_processors.current_shop',
)
if not USING_APP_ENGINE:
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.static',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'shopify_app.middleware.LoginProtection',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'home',
'shopify_app',
)
if USING_APP_ENGINE:
INSTALLED_APPS += (
'djangoappengine',
'djangotoolbox',
)
else:
INSTALLED_APPS += (
'django.contrib.sites',
'django.contrib.staticfiles',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
data/SALib/SALib/versioneer.py
|
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
[](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
* `pip install versioneer` to somewhere to your $PATH
* run `versioneer-installer` in your source tree: this installs `versioneer.py`
* follow the instructions below (also in the `versioneer.py` docstring)
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example 'git describe --tags --dirty --always' reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time. However,
when you use "setup.py build" or "setup.py sdist", `_version.py` in the new
copy is replaced by a small static file that contains just the generated
version data.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the "git archive" command. As a result, generated tarballs will
contain enough information to get the proper version.
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py versioneer` command (described below) will
append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a string, frequently the same as tag_prefix, which appears at the start of
all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'.
This tool provides one script, named `versioneer-installer`. That script does
one thing: write a copy of `versioneer.py` into the current directory.
To versioneer-enable your project:
* 1: Run `versioneer-installer` to copy `versioneer.py` into the top of your
source tree.
* 2: add the following lines to the top of your `setup.py`, with the
configuration values you decided earlier:
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'src/myproject/_version.py'
versioneer.versionfile_build = 'myproject/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'myproject-'
* 3: add the following arguments to the setup() call in your setup.py:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: now run `setup.py versioneer`, which will create `_version.py`, and will
modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`). It will also
modify your `MANIFEST.in` to include both `versioneer.py` and the generated
`_version.py` in sdist tarballs.
* 5: commit these changes to your VCS. To make sure you won't forget,
`setup.py versioneer` will mark everything it touched for addition.
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Currently, all version strings must be based upon a tag. Versioneer will
report "unknown" until your tree has at least one tag in its history. This
restriction will be fixed eventually (see issue
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different keys for different flavors
of the version string:
* `['version']`: condensed tag+distance+shortid+dirty identifier. For git,
this uses the output of `git describe --tags --dirty --always` but strips
the tag_prefix. For example "0.11-2-g1076c97-dirty" indicates that the tree
is like the "1076c97" commit but has uncommitted changes ("-dirty"), and
that this commit is two revisions ("-2-") beyond the "0.11" tag. For
released software (exactly equal to a known tag), the identifier will only
contain the stripped tag, e.g. "0.11".
* `['full']`: detailed revision identifier. For Git, this is the full SHA1
commit id, followed by "-dirty" if the tree contains uncommitted changes,
e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac-dirty".
Some variants are more useful than others. Including `full` in a bug report
should allow developers to reconstruct the exact code being tested (or
indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
In the future, this will also include a
[PEP-0440](http://legacy.python.org/dev/peps/pep-0440/) -compatible flavor
(e.g. `1.2.post0.dev123`). This loses a lot of information (and has no room
for a hash-based revision id), but is safe to use in a `setup.py`
"`version=`" argument. It also enables tools like *pip* to compare version
strings and evaluate compatibility constraint declarations.
The `setup.py versioneer` command adds the following text to your
`__init__.py` to place a basic version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* re-run `versioneer-installer` in your source tree to replace your copy of
`versioneer.py`
* edit `setup.py`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `setup.py versioneer` to replace `SRC/_version.py`
* commit any changed files
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py versioneer`. This will enable the use of additional version-control
systems (SVN, etc) in the future.
Nothing special.
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
from distutils.command.build import build as _build
from distutils.command.sdist import sdist as _sdist
from distutils.core import Command
import os, sys, re, subprocess, errno
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = None
LONG_VERSION_PY = {}
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
LONG_VERSION_PY['git'] = '''
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %%
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {}
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {}
refs = set([r.strip() for r in refnames.strip("()").split(",")])
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return { "version": r,
"full": keywords["full"].strip() }
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
'''
def git_get_keywords(versionfile_abs):
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {}
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {}
refs = set([r.strip() for r in refnames.strip("()").split(",")])
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
SHORT_VERSION_PY = """
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
with open(filename) as f:
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
except EnvironmentError:
return {}
return versions
def write_to_version_file(filename, versions):
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % versions)
print("set %s to '%s'" % (filename, versions["version"]))
def get_root():
try:
return os.path.dirname(os.path.abspath(__file__))
except NameError:
return os.path.dirname(os.path.abspath(sys.argv[0]))
def vcs_function(vcs, suffix):
return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None)
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
assert VCS is not None, "please set versioneer.VCS"
root = get_root()
versionfile_abs = os.path.join(root, versionfile_source)
get_keywords_f = vcs_function(VCS, "get_keywords")
versions_from_keywords_f = vcs_function(VCS, "versions_from_keywords")
if get_keywords_f and versions_from_keywords_f:
vcs_keywords = get_keywords_f(versionfile_abs)
ver = versions_from_keywords_f(vcs_keywords, tag_prefix)
if ver:
if verbose: print("got version from expanded keyword %s" % ver)
return ver
ver = versions_from_file(versionfile_abs)
if ver:
if verbose: print("got version from file %s %s" % (versionfile_abs,ver))
return ver
versions_from_vcs_f = vcs_function(VCS, "versions_from_vcs")
if versions_from_vcs_f:
ver = versions_from_vcs_f(tag_prefix, root, verbose)
if ver:
if verbose: print("got version from VCS %s" % ver)
return ver
ver = versions_from_parentdir(parentdir_prefix, root, verbose)
if ver:
if verbose: print("got version from parentdir %s" % ver)
return ver
if verbose: print("got version from default %s" % default)
return default
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print("Version is currently: %s" % ver)
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
if versionfile_build:
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
if 'cx_Freeze' in sys.modules:
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
versions = get_versions(verbose=True)
target_versionfile = versionfile_source
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
target_versionfile = os.path.join(base_dir, versionfile_source)
print("UPDATING %s" % target_versionfile)
os.unlink(target_versionfile)
with open(target_versionfile, "w") as f:
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "install/upgrade Versioneer files: __init__.py SRC/_version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print(" creating %s" % versionfile_source)
with open(versionfile_source, "w") as f:
assert VCS is not None, "please set versioneer.VCS"
LONG = LONG_VERSION_PY[VCS]
f.write(LONG % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
manifest_in = os.path.join(get_root(), "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
do_vcs_install(manifest_in, versionfile_source, ipy)
def get_cmdclass():
cmds = {'version': cmd_version,
'versioneer': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
}
if 'cx_Freeze' in sys.modules:
cmds['build_exe'] = cmd_build_exe
del cmds['build']
return cmds
|
data/NeuroVault/NeuroVault/neurovault/apps/statmaps/tests/test_counter.py
|
import os.path
from django.contrib.auth.models import User
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, Client
from neurovault.apps.statmaps.forms import NIDMResultsForm
from neurovault.apps.statmaps.models import Collection, StatisticMap, Comparison
from neurovault.apps.statmaps.utils import count_processing_comparisons,count_existing_comparisons
from .utils import clearDB
class Test_Counter(TestCase):
def setUp(self):
print "\n\n
self.test_path = os.path.abspath(os.path.dirname(__file__))
self.user = User.objects.create(username='neurovault')
self.client = Client()
self.client.login(username=self.user)
self.Collection1 = Collection(name='Collection1', owner=self.user,
DOI='10.3389/fninf.2015.00008')
self.Collection1.save()
self.Collection2 = Collection(name='Collection2', owner=self.user,
DOI='10.3389/fninf.2015.00009')
self.Collection2.save()
self.Collection3 = Collection(name='Collection3', owner=self.user,
DOI='10.3389/fninf.2015.00011')
self.Collection3.save()
def tearDown(self):
clearDB()
def test_statmaps_processing(self):
print "\nTesting Counter - added statistic maps
Image1 = StatisticMap(name='Image1', collection=self.Collection1, file='motor_lips.nii.gz', map_type="Z")
Image1.file = SimpleUploadedFile('motor_lips.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/motor_lips.nii.gz')).read())
Image1.save()
images_processing = count_processing_comparisons(Image1.pk)
print "%s images processing [should be 0]" %(images_processing)
self.assertEqual(images_processing,0)
Image2 = StatisticMap(name='Image2', collection=self.Collection2, file='beta_0001.nii.gz', map_type="Other")
Image2.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
Image2.save()
images_processing = count_processing_comparisons(Image1.pk)
print "%s images processing [should be 0]" %(images_processing)
self.assertEqual(images_processing,0)
total_comparisons = count_existing_comparisons(Image1.pk)
self.assertEqual(total_comparisons,1)
def test_adding_nidm(self):
Image2 = StatisticMap(name='Image2', collection=self.Collection1, file='beta_0001.nii.gz', map_type="Other")
Image2.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
Image2.save()
zip_file = open(os.path.join(self.test_path,'test_data/nidm/spm_example.nidm.zip'), 'rb')
post_dict = {
'name': 'spm_nidm',
'description':'{0} upload test'.format('spm_example'),
'collection':self.Collection2.pk}
fname = os.path.basename(os.path.join(self.test_path,'test_data/nidm/spm_example.nidm.zip'))
file_dict = {'zip_file': SimpleUploadedFile(fname, zip_file.read())}
zip_file.close()
form = NIDMResultsForm(post_dict, file_dict)
nidm = form.save()
print "\nTesting Counter - added nidm result
total_comparisons = count_existing_comparisons(Image2.pk)
self.assertEqual(total_comparisons,1)
Image2ss = StatisticMap(name='Image2 - single subject', collection=self.Collection3, file='beta_0001.nii.gz', map_type="Other", analysis_level='S')
Image2ss.file = SimpleUploadedFile('beta_0001.nii.gz', file(os.path.join(self.test_path,'test_data/statmaps/beta_0001.nii.gz')).read())
Image2ss.save()
total_comparisons = count_existing_comparisons(Image2ss.pk)
self.assertEqual(total_comparisons,0)
number_comparisons = len(Comparison.objects.all())
print "\n %s comparisons exist after adding NIDM `[should not be 0]" %(number_comparisons)
self.assertEqual(number_comparisons>0,True)
|
data/RobotLocomotion/director/src/python/tests/testLoadUrdf.py
|
from director.consoleapp import ConsoleApp
from director import visualization as vis
from director import roboturdf
from director import jointcontrol
import argparse
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('--urdf', type=str, default=None, help='urdf filename to load')
args, unknown = parser.parse_known_args()
return args
app = ConsoleApp()
view = app.createView()
args = getArgs()
if args.urdf:
robotModel = roboturdf.openUrdf(args.urdf, view)
jointNames = robotModel.model.getJointNames()
jointController = jointcontrol.JointController([robotModel], jointNames=jointNames)
else:
robotModel, jointController = roboturdf.loadRobotModel('robot model', view)
print 'urdf file:', robotModel.getProperty('Filename')
for joint in robotModel.model.getJointNames():
print 'joint:', joint
for link in robotModel.model.getLinkNames():
print 'link:', link
robotModel.getLinkFrame(link)
if app.getTestingInteractiveEnabled():
view.show()
app.start()
|
data/Lasagne/Lasagne/lasagne/tests/layers/test_merge.py
|
from mock import Mock
import numpy
import pytest
import theano
class TestAutocrop:
def test_autocrop_array_shapes(self):
from lasagne.layers.merge import autocrop_array_shapes
crop0 = None
crop1 = [None, 'lower', 'center', 'upper']
crop2 = ['lower', 'upper']
crop_bad = ['lower', 'upper', 'bad', 'worse']
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop0) == \
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)]
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop1) == \
[(1, 2, 3, 2), (5, 2, 3, 2), (5, 2, 3, 2)]
assert autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop2) == \
[(1, 2, 3, 4), (1, 2, 7, 8), (1, 2, 3, 2)]
with pytest.raises(ValueError):
autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7, 8), (5, 4, 3, 2)], crop_bad)
with pytest.raises(ValueError):
autocrop_array_shapes(
[(1, 2, 3, 4), (5, 6, 7), (5, 4, 3, 2, 10)], crop1)
def test_crop_inputs(self):
from lasagne.layers.merge import autocrop
from numpy.testing import assert_array_equal
crop_0 = None
crop_1 = [None, 'lower', 'center', 'upper']
crop_l = ['lower', 'lower', 'lower', 'lower']
crop_c = ['center', 'center', 'center', 'center']
crop_u = ['upper', 'upper', 'upper', 'upper']
crop_x = ['lower', 'lower']
crop_bad = ['lower', 'lower', 'bad', 'worse']
x0 = numpy.random.random((2, 3, 5, 7))
x1 = numpy.random.random((1, 2, 3, 4))
x2 = numpy.random.random((6, 3, 4, 2))
def crop_test(cropping, inputs, expected):
inputs = [theano.shared(x) for x in inputs]
outs = autocrop(inputs, cropping)
outs = [o.eval() for o in outs]
assert len(outs) == len(expected)
for o, e in zip(outs, expected):
assert_array_equal(o, e)
crop_test(crop_0, [x0, x1],
[x0, x1])
crop_test(crop_1, [x0, x1],
[x0[:, :2, 1:4, 3:], x1[:, :, :, :]])
crop_test(crop_l, [x0, x1],
[x0[:1, :2, :3, :4], x1[:, :, :, :]])
crop_test(crop_c, [x0, x1],
[x0[:1, :2, 1:4, 1:5], x1[:, :, :, :]])
crop_test(crop_u, [x0, x1],
[x0[1:, 1:, 2:, 3:], x1[:, :, :, :]])
crop_test(crop_0, [x0, x2],
[x0, x2])
crop_test(crop_1, [x0, x2],
[x0[:, :, :4, 5:], x2[:, :, :, :]])
crop_test(crop_l, [x0, x2],
[x0[:, :, :4, :2], x2[:2, :, :, :]])
crop_test(crop_c, [x0, x2],
[x0[:, :, :4, 2:4], x2[2:4, :, :, :]])
crop_test(crop_u, [x0, x2],
[x0[:, :, 1:, 5:], x2[4:, :, :, :]])
crop_test(crop_0, [x0, x1, x2],
[x0, x1, x2])
crop_test(crop_1, [x0, x1, x2],
[x0[:, :2, 1:4, 5:], x1[:, :, :, 2:], x2[:, :2, :3, :]])
crop_test(crop_l, [x0, x1, x2],
[x0[:1, :2, :3, :2], x1[:, :, :, :2], x2[:1, :2, :3, :]])
crop_test(crop_c, [x0, x1, x2],
[x0[:1, :2, 1:4, 2:4], x1[:, :, :, 1:3], x2[2:3, :2, :3, :]])
crop_test(crop_u, [x0, x1, x2],
[x0[1:, 1:, 2:, 5:], x1[:, :, :, 2:], x2[5:, 1:, 1:, :]])
crop_test(crop_x, [x0, x1, x2],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
crop_test(crop_x, [x0, x1, x2, x0, x1, x2],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :],
x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
with pytest.raises(ValueError):
crop_test(crop_bad, [x0, x1, x2],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
with pytest.raises(ValueError):
crop_test(crop_bad, [x0[:, :, :, 0], x1, x2[:, :, :, :, None]],
[x0[:1, :2, :, :], x1[:1, :2, :, :], x2[:1, :2, :, :]])
class TestConcatLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1)
@pytest.fixture
def crop_layer_0(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=0,
cropping=['lower'] * 2)
@pytest.fixture
def crop_layer_1(self):
from lasagne.layers.merge import ConcatLayer
return ConcatLayer([Mock(), Mock()], axis=1,
cropping=['lower'] * 2)
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for([(3, 2), (3, 5)]) == (3, 7)
assert layer.get_output_shape_for([(3, 2), (3, None)]) == (3, None)
assert layer.get_output_shape_for([(None, 2), (3, 5)]) == (3, 7)
assert layer.get_output_shape_for([(None, 2), (None, 5)]) == (None, 7)
with pytest.raises(ValueError):
layer.get_output_shape_for([(4, None), (3, 5)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, 2), (4, None)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(None, 2), (3, 5), (4, 5)])
def test_get_output_shape_for_cropped(self, crop_layer_0, crop_layer_1):
input_shapes = [(3, 2), (4, 5)]
result_0 = crop_layer_0.get_output_shape_for(input_shapes)
result_1 = crop_layer_1.get_output_shape_for(input_shapes)
assert result_0 == (7, 2)
assert result_1 == (3, 7)
def test_get_output_for(self, layer):
inputs = [theano.shared(numpy.ones((3, 3))),
theano.shared(numpy.ones((3, 2)))]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.hstack([input.get_value() for input in inputs])
assert (result_eval == desired_result).all()
def test_get_output_for_cropped(self, crop_layer_0, crop_layer_1):
x0 = numpy.random.random((5, 3))
x1 = numpy.random.random((4, 2))
inputs = [theano.shared(x0),
theano.shared(x1)]
result_0 = crop_layer_0.get_output_for(inputs).eval()
result_1 = crop_layer_1.get_output_for(inputs).eval()
desired_result_0 = numpy.concatenate([x0[:, :2], x1[:, :2]], axis=0)
desired_result_1 = numpy.concatenate([x0[:4, :], x1[:4, :]], axis=1)
assert (result_0 == desired_result_0).all()
assert (result_1 == desired_result_1).all()
class TestElemwiseSumLayer:
@pytest.fixture
def layer(self):
from lasagne.layers.merge import ElemwiseSumLayer
return ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, -1])
@pytest.fixture
def crop_layer(self):
from lasagne.layers.merge import ElemwiseSumLayer
return ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, -1],
cropping=['lower'] * 2)
def test_get_output_shape_for(self, layer):
assert layer.get_output_shape_for([(3, 2), (3, 2)]) == (3, 2)
assert layer.get_output_shape_for([(3, 2), (3, None)]) == (3, 2)
assert layer.get_output_shape_for([(None, 2), (3, 2)]) == (3, 2)
assert layer.get_output_shape_for([(None, 2), (None, 2)]) == (None, 2)
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, None), (4, 2)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(3, 2), (4, None)])
with pytest.raises(ValueError):
layer.get_output_shape_for([(None, 2), (3, 2), (4, 2)])
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = 2*a - b
assert (result_eval == desired_result).all()
def test_get_output_for_cropped(self, crop_layer):
from numpy.testing import assert_array_almost_equal as aeq
x0 = numpy.random.random((5, 3))
x1 = numpy.random.random((4, 2))
inputs = [theano.shared(x0),
theano.shared(x1)]
result = crop_layer.get_output_for(inputs).eval()
desired_result = 2*x0[:4, :2] - x1[:4, :2]
aeq(result, desired_result)
def test_bad_coeffs_fails(self, layer):
from lasagne.layers.merge import ElemwiseSumLayer
with pytest.raises(ValueError):
ElemwiseSumLayer([Mock(), Mock()], coeffs=[2, 3, -1])
class TestElemwiseMergeLayerMul:
@pytest.fixture
def layer(self):
import theano.tensor as T
from lasagne.layers.merge import ElemwiseMergeLayer
return ElemwiseMergeLayer([Mock(), Mock()], merge_function=T.mul)
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = a*b
assert (result_eval == desired_result).all()
class TestElemwiseMergeLayerMaximum:
@pytest.fixture
def layer(self):
import theano.tensor as T
from lasagne.layers.merge import ElemwiseMergeLayer
return ElemwiseMergeLayer([Mock(), Mock()], merge_function=T.maximum)
def test_get_output_for(self, layer):
a = numpy.array([[0, 1], [2, 3]])
b = numpy.array([[1, 2], [4, 5]])
inputs = [theano.shared(a),
theano.shared(b)]
result = layer.get_output_for(inputs)
result_eval = result.eval()
desired_result = numpy.maximum(a, b)
assert (result_eval == desired_result).all()
|
data/ReactiveX/RxPY/rx/linq/observable/toasync.py
|
from rx.observable import Observable
from rx.concurrency import timeout_scheduler
from rx.subjects import AsyncSubject
from rx.internal import extensionclassmethod
@extensionclassmethod(Observable)
def to_async(cls, func, scheduler=None):
"""Converts the function into an asynchronous function. Each invocation
of the resulting asynchronous function causes an invocation of the
original synchronous function on the specified scheduler.
Example:
res = Observable.to_async(lambda x, y: x + y)(4, 3)
res = Observable.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3)
res = Observable.to_async(lambda x: log.debug(x),
Scheduler.timeout)('hello')
func -- {Function} Function to convert to an asynchronous function.
scheduler -- {Scheduler} [Optional] Scheduler to run the function on. If
not specified, defaults to Scheduler.timeout.
Returns {Function} Asynchronous function.
"""
scheduler = scheduler or timeout_scheduler
def wrapper(*args):
subject = AsyncSubject()
def action(scheduler, state):
try:
result = func(*args)
except Exception as ex:
subject.on_error(ex)
return
subject.on_next(result)
subject.on_completed()
scheduler.schedule(action)
return subject.as_observable()
return wrapper
|
data/StackStorm/st2contrib/packs/docker/actions/pull_image.py
|
from lib.base import DockerBasePythonAction
__all__ = [
'DockerPullImageAction'
]
class DockerPullImageAction(DockerBasePythonAction):
def run(self, repo, tag=None, insecure_registry=False,
auth_username_override=None, auth_password_override=None):
auth_override = (auth_username_override and auth_password_override)
if auth_override:
auth_config = {}
auth_config['username'] = auth_username_override
auth_config['password'] = auth_password_override
return self.wrapper.pull(repo=repo, tag=tag, insecure_registry=insecure_registry,
auth_config=auth_config)
else:
return self.wrapper.pull(repo=repo, tag=tag, insecure_registry=insecure_registry)
|
data/aerospike/aerospike-client-python/test/old_tests/test_udf_put.py
|
import pytest
import sys
import time
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestUdfPut(TestBaseClass):
def setup_class(cls):
"""
Setup class
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user is None and password is None:
TestUdfPut.client = aerospike.client(config).connect()
else:
TestUdfPut.client = aerospike.client(
config).connect(user, password)
def teardown_class(cls):
TestUdfPut.client.close()
def setup_method(self, method):
"""
Setup method
"""
def teardown_method(self, method):
"""
Teardown method
"""
time.sleep(1)
udf_list = TestUdfPut.client.udf_list({'timeout': 100})
for udf in udf_list:
if udf['name'] == 'example.lua':
TestUdfPut.client.udf_remove("example.lua")
def test_udf_put_without_parameters(self):
with pytest.raises(TypeError) as typeError:
TestUdfPut.client.udf_put()
assert "Required argument 'filename' (pos 1) not found" in str(
typeError.value)
def test_udf_put_with_proper_parameters(self):
policy = {}
filename = "example.lua"
udf_type = 0
status = TestUdfPut.client.udf_put(filename, udf_type, policy)
assert status == 0
udf_list = TestUdfPut.client.udf_list({})
present = False
for udf in udf_list:
if 'example.lua' == udf['name']:
present = True
assert True if present else False
def test_udf_put_with_invalid_timeout_policy_value(self):
policy = {'timeout': 0.1}
filename = "example.lua"
udf_type = 0
try:
TestUdfPut.client.udf_put(filename, udf_type, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_udf_put_with_proper_timeout_policy_value(self):
policy = {'timeout': 1000}
filename = "example.lua"
udf_type = 0
status = TestUdfPut.client.udf_put(filename, udf_type, policy)
assert status == 0
udf_list = TestUdfPut.client.udf_list({})
for udf in udf_list:
if 'example.lua' == udf['name']:
_ = True
def test_udf_put_with_non_existent_filename(self):
policy = {}
filename = "somefile"
udf_type = 0
try:
TestUdfPut.client.udf_put(filename, udf_type, policy)
except e.LuaFileNotFound as exception:
assert exception.code == 1302
def test_udf_put_with_non_lua_udf_type_and_lua_script_file(self):
policy = {'timeout': 0}
filename = "example.lua"
udf_type = 1
try:
TestUdfPut.client.udf_put(filename, udf_type, policy)
except e.ClientError as exception:
assert exception.code == -1
assert exception.msg == "Invalid UDF language"
def test_udf_put_with_all_none_parameters(self):
with pytest.raises(TypeError) as exception:
TestUdfPut.client.udf_put(None, None, None)
assert "an integer is required" in str(exception.value)
def test_udf_put_with_filename_unicode(self):
policy = {}
filename = u"example.lua"
udf_type = 0
status = TestUdfPut.client.udf_put(filename, udf_type, policy)
assert status == 0
time.sleep(2)
udf_list = TestUdfPut.client.udf_list({})
present = False
for udf in udf_list:
if 'example.lua' == udf['name']:
present = True
assert True if present else False
def test_udf_put_with_proper_parameters_without_connection(self):
policy = {}
filename = "example.lua"
udf_type = 0
config = {'hosts': [('127.0.0.1', 3000)]}
client1 = aerospike.client(config)
try:
client1.udf_put(filename, udf_type, policy)
except e.ClusterError as exception:
assert exception.code == 11
assert exception.msg == 'No connection to aerospike cluster'
|
data/MirantisWorkloadMobility/CloudFerry/cloudferry/lib/os/actions/check_config_quota_neutron.py
|
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import log
from cloudferry.lib.utils import utils as utl
LOG = log.getLogger(__name__)
class CheckConfigQuotaNeutron(action.Action):
"""
Checking config quotas between src and dst clouds.
If all tenants have customs quotas then different configurations does not
matter.
"""
def run(self, **kwargs):
src_cloud = self.src_cloud
dst_cloud = self.dst_cloud
network_src = src_cloud.resources[utl.NETWORK_RESOURCE]
identity_dst = dst_cloud.resources[utl.IDENTITY_RESOURCE]
network_dst = dst_cloud.resources[utl.NETWORK_RESOURCE]
search_opts_tenant = kwargs.get('search_opts_tenant', {})
tenants_src = self.get_src_tenants(search_opts_tenant)
list_quotas = network_src.list_quotas()
tenants_without_quotas = self.get_tenants_without_quotas(tenants_src,
list_quotas)
if not tenants_without_quotas:
LOG.info("On SRC cloud all tenants "
"have custom quotas for network")
LOG.info("Difference between clouds configs "
"default quotas will not calculated")
LOG.info("Migration can proceed")
return
LOG.info("Checking default quota "
"configuration on source and destination cloud")
quot = network_src.show_quota(tenants_without_quotas[0])
dst_temp_tenant = identity_dst.create_tenant("Test Tenant For Quotas")
quot_default_dst = network_dst.show_quota(dst_temp_tenant.id)
is_configs_different = False
identity_dst.delete_tenant(dst_temp_tenant)
for item_quot, val_quot in quot.iteritems():
if val_quot != quot_default_dst[item_quot]:
is_configs_different = True
LOG.info("Item %s in quotas is different (SRC CLOUD: %s, "
"DST CLOUD: %s)", item_quot, val_quot,
quot_default_dst[item_quot])
if not is_configs_different:
LOG.info("Configs on clouds is equals")
@staticmethod
def get_tenants_without_quotas(tenants_src, list_quotas):
tenants_ids = tenants_src.keys()
quotas_ids_tenants = [quota["tenant_id"] for quota in list_quotas]
return list(set(tenants_ids) - set(quotas_ids_tenants))
def get_src_tenants(self, search_opts):
identity_src = self.src_cloud.resources[utl.IDENTITY_RESOURCE]
if search_opts.get('tenant_id'):
filter_tenants_ids_list = search_opts['tenant_id']
tenants = [identity_src.keystone_client.tenants.find(id=tnt_id) for
tnt_id in filter_tenants_ids_list]
else:
tenants = identity_src.get_tenants_list()
tenants_dict = {tenant.id: tenant.name for tenant in tenants}
return tenants_dict
|
data/OpenMDAO/OpenMDAO/examples/intersect_parabola_line.py
|
from __future__ import print_function
from openmdao.api import Component, Group, Problem, Newton, ScipyGMRES
class Line(Component):
"""Evaluates y = -2x + 4."""
def __init__(self):
super(Line, self).__init__()
self.add_param('x', 1.0)
self.add_output('y', 0.0)
self.slope = -2.0
self.intercept = 4.0
def solve_nonlinear(self, params, unknowns, resids):
""" y = -2x + 4 """
x = params['x']
m = self.slope
b = self.intercept
unknowns['y'] = m*x + b
def linearize(self, params, unknowns, resids):
""" Jacobian for our line."""
m = self.slope
J = {}
J['y', 'x'] = m
return J
class Parabola(Component):
"""Evaluates y = 3x^2 - 5"""
def __init__(self):
super(Parabola, self).__init__()
self.add_param('x', 1.0)
self.add_output('y', 0.0)
self.a = 3.0
self.b = 0.0
self.c = -5.0
def solve_nonlinear(self, params, unknowns, resids):
""" y = 3x^2 - 5 """
x = params['x']
a = self.a
b = self.b
c = self.c
unknowns['y'] = a*x**2 + b*x + c
def linearize(self, params, unknowns, resids):
""" Jacobian for our parabola."""
x = params['x']
a = self.a
b = self.b
J = {}
J['y', 'x'] = 2.0*a*x + b
return J
class Balance(Component):
"""Evaluates the residual y1-y2"""
def __init__(self):
super(Balance, self).__init__()
self.add_param('y1', 0.0)
self.add_param('y2', 0.0)
self.add_state('x', 5.0)
def solve_nonlinear(self, params, unknowns, resids):
"""This component does no calculation on its own. It mainly holds the
initial value of the state. An OpenMDAO solver outside of this
component varies it to drive the residual to zero."""
pass
def apply_nonlinear(self, params, unknowns, resids):
""" Report the residual y1-y2 """
y1 = params['y1']
y2 = params['y2']
resids['x'] = y1 - y2
def linearize(self, params, unknowns, resids):
""" Jacobian for our parabola."""
J = {}
J['x', 'y1'] = 1.0
J['x', 'y2'] = -1.0
return J
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
root.add('line', Line())
root.add('parabola', Parabola())
root.add('bal', Balance())
root.connect('line.y', 'bal.y1')
root.connect('parabola.y', 'bal.y2')
root.connect('bal.x', 'line.x')
root.connect('bal.x', 'parabola.x')
root.nl_solver = Newton()
root.ln_solver = ScipyGMRES()
top.setup()
top['bal.x'] = 7.0
root.list_states()
top.run()
print('Positive Solution x=%f, line.y=%f, parabola.y=%f' % (top['bal.x'], top['line.y'], top['parabola.y']))
top['bal.x'] = -7.0
root.list_states()
top.run()
print('Negative Solution x=%f, line.y=%f, parabola.y=%f' % (top['bal.x'], top['line.y'], top['parabola.y']))
|
data/IvanMalison/okcupyd/tests/photo_test.py
|
from . import util
from okcupyd import User, photo
@util.use_cassette(path='photo_upload',
match_on=util.match_on_no_body)
def test_photo_upload():
uploader = photo.PhotoUploader()
upload_response_dict = uploader.upload_and_confirm('fixtures/image.jpg')
assert int(upload_response_dict['id']) > 0
@util.use_cassette(path='test_photo_delete', match_on=util.match_on_no_body)
def test_photo_delete():
user = User()
response_dict = user.photo.upload_and_confirm(user.quickmatch().photo_infos[0])
before_delete_photos = user.profile.photo_infos
user.photo.delete(response_dict['id'])
user.profile.refresh()
assert len(before_delete_photos) - 1 == len(user.profile.photo_infos)
def test_make_photo_uri_from_https_link():
photo_info = photo.Info.from_cdn_uri(
'https://k0.okccdn.com/php/load_okc_image'
'.php/images/150x150/558x800/0x21/400x421/0'
'/2254475731855279447.webp?v=2'
)
assert photo_info.id == 2254475731855279447
assert photo_info.thumb_nail_top == 21
@util.use_cassette
def test_photo_info_upload(vcr_live_sleep):
user = User()
response = user.photo.upload_and_confirm(user.quickmatch().photo_infos[0])
vcr_live_sleep(2)
assert int(response['id']) in [pi.id for pi in user.profile.photo_infos]
|
data/Schwanksta/python-arcgis-rest-query/setup.py
|
import sys
from setuptools import setup
install_requires = [
"argparse>=1.2.1",
"requests>=2.4.3"
]
setup(
name='arcgis-rest-query',
version='0.14',
description='A tool to download a layer from an ArcGIS web service as GeoJSON',
author='Ken Schwencke',
author_email='schwank@gmail.com',
url='https://github.com/Schwanksta/python-arcgis-rest-query',
license='MIT',
packages=('arcgis',),
scripts=(
'bin/arcgis-get',
),
install_requires=install_requires,
)
|
data/StackStorm/st2/st2common/tests/unit/test_aliasesregistrar.py
|
import os
from st2common.bootstrap import aliasesregistrar
from st2tests import DbTestCase, fixturesloader
ALIASES_FIXTURE_PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
ALIASES_FIXTURE_PATH = os.path.join(ALIASES_FIXTURE_PACK_PATH, 'aliases')
class TestAliasRegistrar(DbTestCase):
def test_alias_registration(self):
count = aliasesregistrar.register_aliases(pack_dir=ALIASES_FIXTURE_PACK_PATH)
self.assertEqual(count, len(os.listdir(ALIASES_FIXTURE_PATH)))
|
data/RoseOu/flasky/venv/lib/python2.7/site-packages/pygments/lexers/_stan_builtins.py
|
"""
pygments.lexers._stan_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names of functions for Stan used by
``pygments.lexers.math.StanLexer.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
CONSTANTS=[ 'e',
'epsilon',
'log10',
'log2',
'negative_epsilon',
'negative_infinity',
'not_a_number',
'pi',
'positive_infinity',
'sqrt2']
FUNCTIONS=[ 'Phi',
'abs',
'acos',
'acosh',
'asin',
'asinh',
'atan',
'atan2',
'atanh',
'bernoulli_log',
'beta_binomial_log',
'beta_log',
'binary_log_loss',
'binomial_coefficient_log',
'categorical_log',
'cauchy_log',
'cbrt',
'ceil',
'chi_square_log',
'cholesky_decompose',
'col',
'cols',
'cos',
'cosh',
'determinant',
'diag_matrix',
'diagonal',
'dirichlet_log',
'dot_product',
'dot_self',
'double_exponential_log',
'eigenvalues',
'eigenvalues_sym',
'erf',
'erfc',
'exp',
'exp2',
'expm1',
'exponential_cdf',
'exponential_log',
'fabs',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
'gamma_log',
'hypergeometric_log',
'hypot',
'if_else',
'int_step',
'inv_chi_square_log',
'inv_cloglog',
'inv_gamma_log',
'inv_logit',
'inv_wishart_log',
'inverse',
'lbeta',
'lgamma',
'lkj_corr_cholesky_log',
'lkj_corr_log',
'lkj_cov_log',
'lmgamma',
'log',
'log10',
'log1m',
'log1p',
'log1p_exp',
'log2',
'log_sum_exp',
'logistic_log',
'logit',
'lognormal_cdf',
'lognormal_log',
'max',
'mean',
'min',
'multi_normal_cholesky_log',
'multi_normal_log',
'multi_student_t_log',
'multinomial_log',
'multiply_log',
'multiply_lower_tri_self_transpose',
'neg_binomial_log',
'normal_cdf',
'normal_log',
'ordered_logistic_log',
'pareto_log',
'poisson_log',
'pow',
'prod',
'round',
'row',
'rows',
'scaled_inv_chi_square_log',
'sd',
'sin',
'singular_values',
'sinh',
'softmax',
'sqrt',
'square',
'step',
'student_t_log',
'sum',
'tan',
'tanh',
'tgamma',
'trace',
'trunc',
'uniform_log',
'variance',
'weibull_cdf',
'weibull_log',
'wishart_log']
DISTRIBUTIONS=[ 'bernoulli',
'beta',
'beta_binomial',
'categorical',
'cauchy',
'chi_square',
'dirichlet',
'double_exponential',
'exponential',
'gamma',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
'lkj_corr_cholesky',
'lkj_cov',
'logistic',
'lognormal',
'multi_normal',
'multi_normal_cholesky',
'multi_student_t',
'multinomial',
'neg_binomial',
'normal',
'ordered_logistic',
'pareto',
'poisson',
'scaled_inv_chi_square',
'student_t',
'uniform',
'weibull',
'wishart']
|
data/ZoomerAnalytics/xlwings/xlwings/conversion/numpy_conv.py
|
try:
import numpy as np
except ImportError:
np = None
if np:
try:
import pandas as pd
except ImportError:
pd = None
from . import Converter, Options
class NumpyArrayConverter(Converter):
writes_types = np.ndarray
@classmethod
def base_reader(cls, options):
return (
super(NumpyArrayConverter, cls).base_reader(
Options(options)
.defaults(empty=np.nan)
)
)
@classmethod
def read_value(cls, value, options):
dtype = options.get('dtype', None)
copy = options.get('copy', True)
order = options.get('order', None)
ndim = options.get('ndim', None) or 0
return np.array(value, dtype=dtype, copy=copy, order=order, ndmin=ndim)
@classmethod
def write_value(cls, value, options):
try:
value = np.where(np.isnan(value), None, value)
value = value.tolist()
except TypeError:
if pd:
value[pd.isnull(value)] = None
value = value.tolist()
else:
value = [[None if isinstance(c, float) and np.isnan(c) else c for c in row] for row in value]
return value
NumpyArrayConverter.register(np.array, np.ndarray)
|
data/StackStorm/st2contrib/packs/nest/actions/get_mode.py
|
from lib import actions
class GetModeAction(actions.BaseAction):
def run(self, structure=None, device=None):
if structure and device:
nest = self._get_device(structure, device)
else:
nest = self._get_default_device()
return nest.mode
|
data/Netflix/security_monkey/env-config/config-deploy.py
|
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/security_monkey/security_monkey-deploy.log"
SQLALCHEMY_DATABASE_URI = 'postgresql://postgres:securitymonkeypassword@localhost:5432/secmonkey'
SQLALCHEMY_POOL_SIZE = 50
SQLALCHEMY_MAX_OVERFLOW = 15
ENVIRONMENT = 'ec2'
USE_ROUTE53 = False
FQDN = 'ec2-XX-XXX-XXX-XXX.compute-1.amazonaws.com'
API_PORT = '5000'
WEB_PORT = '443'
WEB_PATH = '/static/ui.html'
FRONTED_BY_NGINX = True
NGINX_PORT = '443'
BASE_URL = 'https://{}/'.format(FQDN)
SECRET_KEY = '<INSERT_RANDOM_STRING_HERE>'
MAIL_DEFAULT_SENDER = 'securitymonkey@example.com'
SECURITY_REGISTERABLE = True
SECURITY_CONFIRMABLE = False
SECURITY_RECOVERABLE = False
SECURITY_PASSWORD_HASH = 'bcrypt'
SECURITY_PASSWORD_SALT = '<INSERT_RANDOM_STRING_HERE>'
SECURITY_TRACKABLE = True
SECURITY_POST_LOGIN_VIEW = BASE_URL
SECURITY_POST_REGISTER_VIEW = BASE_URL
SECURITY_POST_CONFIRM_VIEW = BASE_URL
SECURITY_POST_RESET_VIEW = BASE_URL
SECURITY_POST_CHANGE_VIEW = BASE_URL
SECURITY_TEAM_EMAIL = []
EMAILS_USE_SMTP = False
SES_REGION = 'us-east-1'
MAIL_SERVER = 'smtp.example.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = 'username'
MAIL_PASSWORD = 'password'
WTF_CSRF_ENABLED = True
WTF_CSRF_SSL_STRICT = True
WTF_CSRF_METHODS = ['DELETE', 'POST', 'PUT', 'PATCH']
SECURITYGROUP_INSTANCE_DETAIL = 'FULL'
CORE_THREADS = 25
MAX_THREADS = 30
|
data/ReactiveX/RxPY/tests/test_observable/test_takeuntil.py
|
import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
def _raise(ex):
raise RxException(ex)
class TestTakeUntil(unittest.TestCase):
def test_take_until_preempt_somedata_next(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)]
r_msgs = [on_next(150, 1), on_next(225, 99), on_completed(230)]
l = scheduler.create_hot_observable(l_msgs)
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_completed(225))
def test_take_until_preempt_somedata_error(self):
ex = 'ex'
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)]
r_msgs = [on_next(150, 1), on_error(225, ex)]
l = scheduler.create_hot_observable(l_msgs)
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_error(225, ex))
def test_take_until_nopreempt_somedata_empty(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)]
r_msgs = [on_next(150, 1), on_completed(225)]
l = scheduler.create_hot_observable(l_msgs)
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
def test_take_until_nopreempt_somedata_never(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250)]
l = scheduler.create_hot_observable(l_msgs)
r = Observable.never()
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_next(210, 2), on_next(220, 3), on_next(230, 4), on_next(240, 5), on_completed(250))
def test_take_until_preempt_never_next(self):
scheduler = TestScheduler()
r_msgs = [on_next(150, 1), on_next(225, 2), on_completed(250)]
l = Observable.never()
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(225))
def test_take_until_preempt_never_error(self):
ex = 'ex'
scheduler = TestScheduler()
r_msgs = [on_next(150, 1), on_error(225, ex)]
l = Observable.never()
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_error(225, ex))
def test_take_until_nopreempt_never_empty(self):
scheduler = TestScheduler()
r_msgs = [on_next(150, 1), on_completed(225)]
l = Observable.never()
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal()
def test_take_until_nopreempt_never_never(self):
scheduler = TestScheduler()
l = Observable.never()
r = Observable.never()
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal()
def test_take_until_preempt_beforefirstproduced(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(230, 2), on_completed(240)]
r_msgs = [on_next(150, 1), on_next(210, 2), on_completed(220)]
l = scheduler.create_hot_observable(l_msgs)
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(210))
def test_take_until_preempt_beforefirstproduced_remain_silent_and_proper_disposed(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_error(215, 'ex'), on_completed(240)]
r_msgs = [on_next(150, 1), on_next(210, 2), on_completed(220)]
source_not_disposed = [False]
def action():
source_not_disposed[0] = True
l = scheduler.create_hot_observable(l_msgs).do_action(on_next=action)
r = scheduler.create_hot_observable(r_msgs)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_completed(210))
assert(not source_not_disposed[0])
def test_take_until_nopreempt_afterlastproduced_proper_disposed_signal(self):
scheduler = TestScheduler()
l_msgs = [on_next(150, 1), on_next(230, 2), on_completed(240)]
r_msgs = [on_next(150, 1), on_next(250, 2), on_completed(260)]
signal_not_disposed = [False]
l = scheduler.create_hot_observable(l_msgs)
def action():
signal_not_disposed[0] = True
r = scheduler.create_hot_observable(r_msgs).do_action(on_next=action)
def create():
return l.take_until(r)
results = scheduler.start(create)
results.messages.assert_equal(on_next(230, 2), on_completed(240))
assert(not signal_not_disposed[0])
|
data/PyTables/PyTables/bench/indexed_search.py
|
from __future__ import print_function
from time import time
import subprocess
import random
import numpy
STEP = 1000 * 100
SCALE = 0.1
NI_NTIMES = 1
MROW = 1000 * 1000.
COLDCACHE = 5
WARMCACHE = 5
READ_TIMES = 10
rdm_cod = ['lin', 'rnd']
prec = 6
def get_nrows(nrows_str):
if nrows_str.endswith("k"):
return int(float(nrows_str[:-1]) * 1000)
elif nrows_str.endswith("m"):
return int(float(nrows_str[:-1]) * 1000 * 1000)
elif nrows_str.endswith("g"):
return int(float(nrows_str[:-1]) * 1000 * 1000 * 1000)
else:
raise ValueError(
"value of nrows must end with either 'k', 'm' or 'g' suffixes.")
class DB(object):
def __init__(self, nrows, rng, userandom):
global step, scale
self.step = STEP
self.scale = SCALE
self.rng = rng
self.userandom = userandom
self.filename = '-'.join([rdm_cod[userandom], nrows])
self.nrows = get_nrows(nrows)
def get_db_size(self):
sout = subprocess.Popen("sync;du -s %s" % self.filename, shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return int(line.split()[0])
def print_mtime(self, t1, explain):
mtime = time() - t1
print("%s:" % explain, round(mtime, 6))
print("Krows/s:", round((self.nrows / 1000.) / mtime, 6))
def print_qtime(self, colname, ltimes):
qtime1 = ltimes[0]
qtime2 = ltimes[-1]
print("Query time for %s:" % colname, round(qtime1, 6))
print("Mrows/s:", round((self.nrows / (MROW)) / qtime1, 6))
print("Query time for %s (cached):" % colname, round(qtime2, 6))
print("Mrows/s (cached):", round((self.nrows / (MROW)) / qtime2, 6))
def norm_times(self, ltimes):
"Get the mean and stddev of ltimes, avoiding the extreme values."
lmean = ltimes.mean()
lstd = ltimes.std()
ntimes = ltimes[ltimes < lmean + lstd]
nmean = ntimes.mean()
nstd = ntimes.std()
return nmean, nstd
def print_qtime_idx(self, colname, ltimes, repeated, verbose):
if repeated:
r = "[REP] "
else:
r = "[NOREP] "
ltimes = numpy.array(ltimes)
ntimes = len(ltimes)
qtime1 = ltimes[0]
ctimes = ltimes[1:COLDCACHE]
cmean, cstd = self.norm_times(ctimes)
wtimes = ltimes[WARMCACHE:]
wmean, wstd = self.norm_times(wtimes)
if verbose:
print("Times for cold cache:\n", ctimes)
print("Histogram for warm cache: %s\n%s" %
numpy.histogram(wtimes))
print("%s1st query time for %s:" % (r, colname),
round(qtime1, prec))
print("%sQuery time for %s (cold cache):" % (r, colname),
round(cmean, prec), "+-", round(cstd, prec))
print("%sQuery time for %s (warm cache):" % (r, colname),
round(wmean, prec), "+-", round(wstd, prec))
def print_db_sizes(self, init, filled, indexed):
table_size = (filled - init) / 1024.
indexes_size = (indexed - filled) / 1024.
print("Table size (MB):", round(table_size, 3))
print("Indexes size (MB):", round(indexes_size, 3))
print("Full size (MB):", round(table_size + indexes_size, 3))
def fill_arrays(self, start, stop):
arr_f8 = numpy.arange(start, stop, dtype='float64')
arr_i4 = numpy.arange(start, stop, dtype='int32')
if self.userandom:
arr_f8 += numpy.random.normal(0, stop * self.scale,
size=stop - start)
arr_i4 = numpy.array(arr_f8, dtype='int32')
return arr_i4, arr_f8
def create_db(self, dtype, kind, optlevel, verbose):
self.con = self.open_db(remove=1)
self.create_table(self.con)
init_size = self.get_db_size()
t1 = time()
self.fill_table(self.con)
table_size = self.get_db_size()
self.print_mtime(t1, 'Insert time')
self.index_db(dtype, kind, optlevel, verbose)
indexes_size = self.get_db_size()
self.print_db_sizes(init_size, table_size, indexes_size)
self.close_db(self.con)
def index_db(self, dtype, kind, optlevel, verbose):
if dtype == "int":
idx_cols = ['col2']
elif dtype == "float":
idx_cols = ['col4']
else:
idx_cols = ['col2', 'col4']
for colname in idx_cols:
t1 = time()
self.index_col(self.con, colname, kind, optlevel, verbose)
self.print_mtime(t1, 'Index time (%s)' % colname)
def query_db(self, niter, dtype, onlyidxquery, onlynonidxquery,
avoidfscache, verbose, inkernel):
self.con = self.open_db()
if dtype == "int":
reg_cols = ['col1']
idx_cols = ['col2']
elif dtype == "float":
reg_cols = ['col3']
idx_cols = ['col4']
else:
reg_cols = ['col1', 'col3']
idx_cols = ['col2', 'col4']
if avoidfscache:
rseed = int(numpy.random.randint(self.nrows))
else:
rseed = 19
numpy.random.seed(rseed)
base = numpy.random.randint(self.nrows)
if not onlyidxquery:
for colname in reg_cols:
ltimes = []
random.seed(rseed)
for i in range(NI_NTIMES):
t1 = time()
results = self.do_query(self.con, colname, base, inkernel)
ltimes.append(time() - t1)
if verbose:
print("Results len:", results)
self.print_qtime(colname, ltimes)
self.close_db(self.con)
self.con = self.open_db()
if not onlynonidxquery:
for colname in idx_cols:
ltimes = []
numpy.random.seed(rseed)
rndbase = numpy.random.randint(self.nrows, size=niter)
for i in range(niter):
base = rndbase[i]
t1 = time()
results = self.do_query(self.con, colname, base, inkernel)
ltimes.append(time() - t1)
if verbose:
print("Results len:", results)
self.print_qtime_idx(colname, ltimes, False, verbose)
self.close_db(self.con)
self.con = self.open_db()
ltimes = []
self.close_db(self.con)
self.con = self.open_db()
self.close_db(self.con)
def close_db(self, con):
con.close()
if __name__ == "__main__":
import sys
import getopt
try:
import psyco
psyco_imported = 1
except:
psyco_imported = 0
usage = """usage: %s [-T] [-P] [-v] [-f] [-k] [-p] [-m] [-c] [-q] [-i] [-I] [-S] [-x] [-z complevel] [-l complib] [-R range] [-N niter] [-n nrows] [-d datadir] [-O level] [-t kind] [-s] col -Q [suplim]
-T use Pytables
-P use Postgres
-v verbose
-f do a profile of the run (only query functionality & Python 2.5)
-k do a profile for kcachegrind use (out file is 'indexed_search.kcg')
-p use "psyco" if available
-m use random values to fill the table
-q do a query (both indexed and non-indexed versions)
-i do a query (just indexed one)
-I do a query (just in-kernel one)
-S do a query (just standard one)
-x choose a different seed for random numbers (i.e. avoid FS cache)
-c create the database
-z compress with zlib (no compression by default)
-l use complib for compression (zlib used by default)
-R select a range in a field in the form "start,stop" (def "0,10")
-N number of iterations for reading
-n sets the number of rows (in krows) in each table
-d directory to save data (default: data.nobackup)
-O set the optimization level for PyTables indexes
-t select the index type: "medium" (default) or "full", "light", "ultralight"
-s select a type column for operations ('int' or 'float'. def all)
-Q do a repeteated query up to 10**value
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(
sys.argv[1:], 'TPvfkpmcqiISxz:l:R:N:n:d:O:t:s:Q:')
except:
sys.stderr.write(usage)
sys.exit(1)
usepytables = 0
usepostgres = 0
verbose = 0
doprofile = 0
dokprofile = 0
usepsyco = 0
userandom = 0
docreate = 0
optlevel = 0
kind = "medium"
docompress = 0
complib = "zlib"
doquery = False
onlyidxquery = False
onlynonidxquery = False
inkernel = True
avoidfscache = 0
rng = [-1000, -1000]
repeatquery = 0
repeatvalue = 0
krows = '1k'
niter = READ_TIMES
dtype = "all"
datadir = "data.nobackup"
for option in opts:
if option[0] == '-T':
usepytables = 1
elif option[0] == '-P':
usepostgres = 1
elif option[0] == '-v':
verbose = 1
elif option[0] == '-f':
doprofile = 1
elif option[0] == '-k':
dokprofile = 1
elif option[0] == '-p':
usepsyco = 1
elif option[0] == '-m':
userandom = 1
elif option[0] == '-c':
docreate = 1
elif option[0] == '-q':
doquery = True
elif option[0] == '-i':
doquery = True
onlyidxquery = True
elif option[0] == '-I':
doquery = True
onlynonidxquery = True
elif option[0] == '-S':
doquery = True
onlynonidxquery = True
inkernel = False
elif option[0] == '-x':
avoidfscache = 1
elif option[0] == '-z':
docompress = int(option[1])
elif option[0] == '-l':
complib = option[1]
elif option[0] == '-R':
rng = [int(i) for i in option[1].split(",")]
elif option[0] == '-N':
niter = int(option[1])
elif option[0] == '-n':
krows = option[1]
elif option[0] == '-d':
datadir = option[1]
elif option[0] == '-O':
optlevel = int(option[1])
elif option[0] == '-t':
if option[1] in ('full', 'medium', 'light', 'ultralight'):
kind = option[1]
else:
print("kind should be either 'full', 'medium', 'light' or "
"'ultralight'")
sys.exit(1)
elif option[0] == '-s':
if option[1] in ('int', 'float'):
dtype = option[1]
else:
print("column should be either 'int' or 'float'")
sys.exit(1)
elif option[0] == '-Q':
repeatquery = 1
repeatvalue = int(option[1])
if not usepytables and not usepostgres:
print("Please select a backend:")
print("PyTables: -T")
print("Postgres: -P")
sys.exit(1)
if usepytables:
from pytables_backend import PyTables_DB
db = PyTables_DB(krows, rng, userandom, datadir,
docompress, complib, kind, optlevel)
elif usepostgres:
from postgres_backend import Postgres_DB
db = Postgres_DB(krows, rng, userandom)
if not avoidfscache:
numpy.random.seed(20)
if verbose:
if userandom:
print("using random values")
if onlyidxquery:
print("doing indexed queries only")
if psyco_imported and usepsyco:
psyco.bind(db.create_db)
psyco.bind(db.query_db)
if docreate:
if verbose:
print("writing %s rows" % krows)
db.create_db(dtype, kind, optlevel, verbose)
if doquery:
print("Calling query_db() %s times" % niter)
if doprofile:
import pstats
import cProfile as prof
prof.run(
'db.query_db(niter, dtype, onlyidxquery, onlynonidxquery, '
'avoidfscache, verbose, inkernel)',
'indexed_search.prof')
stats = pstats.Stats('indexed_search.prof')
stats.strip_dirs()
stats.sort_stats('time', 'calls')
if verbose:
stats.print_stats()
else:
stats.print_stats(20)
elif dokprofile:
from cProfile import Profile
import lsprofcalltree
prof = Profile()
prof.run(
'db.query_db(niter, dtype, onlyidxquery, onlynonidxquery, '
'avoidfscache, verbose, inkernel)')
kcg = lsprofcalltree.KCacheGrind(prof)
ofile = open('indexed_search.kcg', 'w')
kcg.output(ofile)
ofile.close()
elif doprofile:
import hotshot
import hotshot.stats
prof = hotshot.Profile("indexed_search.prof")
benchtime, stones = prof.run(
'db.query_db(niter, dtype, onlyidxquery, onlynonidxquery, '
'avoidfscache, verbose, inkernel)')
prof.close()
stats = hotshot.stats.load("indexed_search.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
else:
db.query_db(niter, dtype, onlyidxquery, onlynonidxquery,
avoidfscache, verbose, inkernel)
if repeatquery:
db.rng = [1, 1]
if verbose:
print("range:", db.rng)
db.query_db(niter, dtype, onlyidxquery, onlynonidxquery,
avoidfscache, verbose, inkernel)
for i in range(repeatvalue):
for j in (1, 2, 5):
rng = j * 10 ** i
db.rng = [-rng / 2, rng / 2]
if verbose:
print("range:", db.rng)
db.query_db(niter, dtype, onlyidxquery, onlynonidxquery,
avoidfscache, verbose, inkernel)
|
data/KunihikoKido/sublime-elasticsearch-client/lib/dateutil/easter.py
|
"""
This module offers a generic easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
y = year
g = y % 19
e = 0
if method < 3:
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))
|
data/Impactstory/total-impact-core/totalimpact/cache.py
|
import os
import sys
import hashlib
import logging
import json
from cPickle import PicklingError
import redis
from totalimpact import REDIS_CACHE_DATABASE_NUMBER
logger = logging.getLogger("ti.cache")
cache_client = redis.from_url(os.getenv("REDIS_URL"), REDIS_CACHE_DATABASE_NUMBER)
MAX_PAYLOAD_SIZE_BYTES = 1000*1000
MAX_CACHE_SIZE_BYTES = 100*1000*1000
class CacheException(Exception):
pass
class Cache(object):
""" Maintains a cache of URL responses in memcached """
def _build_hash_key(self, key):
json_key = json.dumps(key)
hash_key = hashlib.md5(json_key.encode("utf-8")).hexdigest()
return hash_key
def _get_client(self):
return cache_client
def __init__(self, max_cache_age=60*60):
self.max_cache_age = max_cache_age
self.flush_cache()
def flush_cache(self):
mc = self._get_client()
def get_cache_entry(self, key):
""" Get an entry from the cache, returns None if not found """
mc = self._get_client()
hash_key = self._build_hash_key(key)
response = mc.get(hash_key)
if response:
response = json.loads(response)
return response
def set_cache_entry(self, key, data):
""" Store a cache entry """
if sys.getsizeof(data["text"]) > MAX_PAYLOAD_SIZE_BYTES:
logger.debug(u"Not caching because payload is too large")
return None
mc = self._get_client()
if mc.info()["used_memory"] >= MAX_CACHE_SIZE_BYTES:
logger.debug(u"Not caching because redis cache is too full")
return None
hash_key = self._build_hash_key(key)
set_response = mc.set(hash_key, json.dumps(data))
mc.expire(hash_key, self.max_cache_age)
if not set_response:
logger.warning("Unable to store into Redis. Make sure redis server is running.")
raise CacheException("Unable to store into Redis. Make sure redis server is running.")
return set_response
|
data/TheTorProject/ooni-probe/ooni/nettests/experimental/script.py
|
from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
requiresRoot = False
requiresTor = False
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
data/adblockplus/gyp/test/lib/TestCmd.py
|
"""
TestCmd.py: a testing framework for commands and scripts.
The TestCmd module provides a framework for portable automated testing
of executable commands and scripts (in any language, not just Python),
especially commands and scripts that require file system interaction.
In addition to running tests and evaluating conditions, the TestCmd
module manages and cleans up one or more temporary workspace
directories, and provides methods for creating files and directories in
those workspace directories from in-line data, here-documents), allowing
tests to be completely self-contained.
A TestCmd environment object is created via the usual invocation:
import TestCmd
test = TestCmd.TestCmd()
There are a bunch of keyword arguments available at instantiation:
test = TestCmd.TestCmd(description = 'string',
program = 'program_or_script_to_test',
interpreter = 'script_interpreter',
workdir = 'prefix',
subdir = 'subdir',
verbose = Boolean,
match = default_match_function,
diff = default_diff_function,
combine = Boolean)
There are a bunch of methods that let you do different things:
test.verbose_set(1)
test.description_set('string')
test.program_set('program_or_script_to_test')
test.interpreter_set('script_interpreter')
test.interpreter_set(['script_interpreter', 'arg'])
test.workdir_set('prefix')
test.workdir_set('')
test.workpath('file')
test.workpath('subdir', 'file')
test.subdir('subdir', ...)
test.rmdir('subdir', ...)
test.write('file', "contents\n")
test.write(['subdir', 'file'], "contents\n")
test.read('file')
test.read(['subdir', 'file'])
test.read('file', mode)
test.read(['subdir', 'file'], mode)
test.writable('dir', 1)
test.writable('dir', None)
test.preserve(condition, ...)
test.cleanup(condition)
test.command_args(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program')
test.run(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
chdir = 'directory_to_chdir_to',
stdin = 'input to feed to the program\n')
universal_newlines = True)
p = test.start(program = 'program_or_script_to_run',
interpreter = 'script_interpreter',
arguments = 'arguments to pass to program',
universal_newlines = None)
test.finish(self, p)
test.pass_test()
test.pass_test(condition)
test.pass_test(condition, function)
test.fail_test()
test.fail_test(condition)
test.fail_test(condition, function)
test.fail_test(condition, function, skip)
test.no_result()
test.no_result(condition)
test.no_result(condition, function)
test.no_result(condition, function, skip)
test.stdout()
test.stdout(run)
test.stderr()
test.stderr(run)
test.symlink(target, link)
test.banner(string)
test.banner(string, width)
test.diff(actual, expected)
test.match(actual, expected)
test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
test.match_exact(["actual 1\n", "actual 2\n"],
["expected 1\n", "expected 2\n"])
test.match_re("actual 1\nactual 2\n", regex_string)
test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
test.match_re_dotall("actual 1\nactual 2\n", regex_string)
test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
test.tempdir()
test.tempdir('temporary-directory')
test.sleep()
test.sleep(seconds)
test.where_is('foo')
test.where_is('foo', 'PATH1:PATH2')
test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
test.unlink('file')
test.unlink('subdir', 'file')
The TestCmd module provides pass_test(), fail_test(), and no_result()
unbound functions that report test results for use with the Aegis change
management system. These methods terminate the test immediately,
reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
status 0 (success), 1 or 2 respectively. This allows for a distinction
between an actual failed test and a test that could not be properly
evaluated because of an external condition (such as a full file system
or incorrect permissions).
import TestCmd
TestCmd.pass_test()
TestCmd.pass_test(condition)
TestCmd.pass_test(condition, function)
TestCmd.fail_test()
TestCmd.fail_test(condition)
TestCmd.fail_test(condition, function)
TestCmd.fail_test(condition, function, skip)
TestCmd.no_result()
TestCmd.no_result(condition)
TestCmd.no_result(condition, function)
TestCmd.no_result(condition, function, skip)
The TestCmd module also provides unbound functions that handle matching
in the same way as the match_*() methods described above.
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_exact)
test = TestCmd.TestCmd(match = TestCmd.match_re)
test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
The TestCmd module provides unbound functions that can be used for the
"diff" argument to TestCmd.TestCmd instantiation:
import TestCmd
test = TestCmd.TestCmd(match = TestCmd.match_re,
diff = TestCmd.diff_re)
test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
The "diff" argument can also be used with standard difflib functions:
import difflib
test = TestCmd.TestCmd(diff = difflib.context_diff)
test = TestCmd.TestCmd(diff = difflib.unified_diff)
Lastly, the where_is() method also exists in an unbound function
version.
import TestCmd
TestCmd.where_is('foo')
TestCmd.where_is('foo', 'PATH1:PATH2')
TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
"""
__author__ = "Steven Knight <knight at baldmt dot com>"
__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
__version__ = "0.37"
import errno
import os
import os.path
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import traceback
import types
import UserList
__all__ = [
'diff_re',
'fail_test',
'no_result',
'pass_test',
'match_exact',
'match_re',
'match_re_dotall',
'python_executable',
'TestCmd'
]
try:
import difflib
except ImportError:
__all__.append('simple_diff')
def is_List(e):
return type(e) is types.ListType \
or isinstance(e, UserList.UserList)
try:
from UserString import UserString
except ImportError:
class UserString:
pass
if hasattr(types, 'UnicodeType'):
def is_String(e):
return type(e) is types.StringType \
or type(e) is types.UnicodeType \
or isinstance(e, UserString)
else:
def is_String(e):
return type(e) is types.StringType or isinstance(e, UserString)
tempfile.template = 'testcmd.'
if os.name in ('posix', 'nt'):
tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
else:
tempfile.template = 'testcmd.'
re_space = re.compile('\s')
_Cleanup = []
_chain_to_exitfunc = None
def _clean():
global _Cleanup
cleanlist = filter(None, _Cleanup)
del _Cleanup[:]
cleanlist.reverse()
for test in cleanlist:
test.cleanup()
if _chain_to_exitfunc:
_chain_to_exitfunc()
try:
import atexit
except ImportError:
try:
_chain_to_exitfunc = sys.exitfunc
except AttributeError:
pass
sys.exitfunc = _clean
else:
atexit.register(_clean)
try:
zip
except NameError:
def zip(*lists):
result = []
for i in xrange(min(map(len, lists))):
result.append(tuple(map(lambda l, i=i: l[i], lists)))
return result
class Collector:
def __init__(self, top):
self.entries = [top]
def __call__(self, arg, dirname, names):
pathjoin = lambda n, d=dirname: os.path.join(d, n)
self.entries.extend(map(pathjoin, names))
def _caller(tblist, skip):
string = ""
arr = []
for file, line, name, text in tblist:
if file[-10:] == "TestCmd.py":
break
arr = [(file, line, name, text)] + arr
atfrom = "at"
for file, line, name, text in arr[skip:]:
if name in ("?", "<module>"):
name = ""
else:
name = " (" + name + ")"
string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
atfrom = "\tfrom"
return string
def fail_test(self = None, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
By default, the fail_test() method reports that the test FAILED
and exits with a status of 1. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("FAILED test" + of + desc + sep + at)
sys.exit(1)
def no_result(self = None, condition = 1, function = None, skip = 0):
"""Causes a test to exit with no valid result.
By default, the no_result() method reports NO RESULT for the test
and exits with a status of 2. If a condition argument is supplied,
the test fails only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
of = ""
desc = ""
sep = " "
if not self is None:
if self.program:
of = " of " + self.program
sep = "\n\t"
if self.description:
desc = " [" + self.description + "]"
sep = "\n\t"
if os.environ.get('TESTCMD_DEBUG_SKIPS'):
at = _caller(traceback.extract_stack(), skip)
sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
else:
sys.stderr.write("NO RESULT\n")
sys.exit(2)
def pass_test(self = None, condition = 1, function = None):
"""Causes a test to pass.
By default, the pass_test() method reports PASSED for the test
and exits with a status of 0. If a condition argument is supplied,
the test passes only if the condition is true.
"""
if not condition:
return
if not function is None:
function()
sys.stderr.write("PASSED\n")
sys.exit(0)
def match_exact(lines = None, matches = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(matches):
matches = string.split(matches, "\n")
if len(lines) != len(matches):
return
for i in range(len(lines)):
if lines[i] != matches[i]:
return
return 1
def match_re(lines = None, res = None):
"""
"""
if not is_List(lines):
lines = string.split(lines, "\n")
if not is_List(res):
res = string.split(res, "\n")
if len(lines) != len(res):
return
for i in range(len(lines)):
s = "^" + res[i] + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(lines[i]):
return
return 1
def match_re_dotall(lines = None, res = None):
"""
"""
if not type(lines) is type(""):
lines = string.join(lines, "\n")
if not type(res) is type(""):
res = string.join(res, "\n")
s = "^" + res + "$"
try:
expr = re.compile(s, re.DOTALL)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if expr.match(lines):
return 1
try:
import difflib
except ImportError:
pass
else:
def simple_diff(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A function with the same calling signature as difflib.context_diff
(diff -c) and difflib.unified_diff (diff -u) but which prints
output like the simple, unadorned 'diff" command.
"""
sm = difflib.SequenceMatcher(None, a, b)
def comma(x1, x2):
return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
result = []
for op, a1, a2, b1, b2 in sm.get_opcodes():
if op == 'delete':
result.append("%sd%d" % (comma(a1, a2), b1))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
elif op == 'insert':
result.append("%da%s" % (a1, comma(b1, b2)))
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
elif op == 'replace':
result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
result.extend(map(lambda l: '< ' + l, a[a1:a2]))
result.append('---')
result.extend(map(lambda l: '> ' + l, b[b1:b2]))
return result
def diff_re(a, b, fromfile='', tofile='',
fromfiledate='', tofiledate='', n=3, lineterm='\n'):
"""
A simple "diff" of two sets of lines when the expected lines
are regular expressions. This is a really dumb thing that
just compares each line in turn, so it doesn't look for
chunks of matching lines and the like--but at least it lets
you know exactly which line first didn't compare correctl...
"""
result = []
diff = len(a) - len(b)
if diff < 0:
a = a + ['']*(-diff)
elif diff > 0:
b = b + ['']*diff
i = 0
for aline, bline in zip(a, b):
s = "^" + aline + "$"
try:
expr = re.compile(s)
except re.error, e:
msg = "Regular expression error in %s: %s"
raise re.error, msg % (repr(s), e[0])
if not expr.search(bline):
result.append("%sc%s" % (i+1, i+1))
result.append('< ' + repr(a[i]))
result.append('---')
result.append('> ' + repr(b[i]))
i = i+1
return result
if os.name == 'java':
python_executable = os.path.join(sys.prefix, 'jython')
else:
python_executable = sys.executable
if sys.platform == 'win32':
default_sleep_seconds = 2
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
if pathext is None:
pathext = os.environ['PATHEXT']
if is_String(pathext):
pathext = string.split(pathext, os.pathsep)
for ext in pathext:
if string.lower(ext) == string.lower(file[-len(ext):]):
pathext = ['']
break
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
else:
def where_is(file, path=None, pathext=None):
if path is None:
path = os.environ['PATH']
if is_String(path):
path = string.split(path, os.pathsep)
for dir in path:
f = os.path.join(dir, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
return f
return None
default_sleep_seconds = 1
try:
import subprocess
except ImportError:
import new
subprocess = new.module('subprocess')
subprocess.PIPE = 'PIPE'
subprocess.STDOUT = 'STDOUT'
subprocess.mswindows = (sys.platform == 'win32')
try:
import popen2
popen2.Popen3
except AttributeError:
class Popen3:
universal_newlines = 1
def __init__(self, command, **kw):
if sys.platform == 'win32' and command[0] == '"':
command = '"' + command + '"'
(stdin, stdout, stderr) = os.popen3(' ' + command)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def close_output(self):
self.stdout.close()
self.resultcode = self.stderr.close()
def wait(self):
resultcode = self.resultcode
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
else:
try:
popen2.Popen4
except AttributeError:
class Popen4(popen2.Popen3):
childerr = None
def __init__(self, cmd, bufsize=-1):
p2cread, p2cwrite = os.pipe()
c2pread, c2pwrite = os.pipe()
self.pid = os.fork()
if self.pid == 0:
os.dup2(p2cread, 0)
os.dup2(c2pwrite, 1)
os.dup2(c2pwrite, 2)
for i in range(3, popen2.MAXFD):
try:
os.close(i)
except: pass
try:
os.execvp(cmd[0], cmd)
finally:
os._exit(1)
os._exit(1)
os.close(p2cread)
self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
os.close(c2pwrite)
self.fromchild = os.fdopen(c2pread, 'r', bufsize)
popen2._active.append(self)
popen2.Popen4 = Popen4
class Popen3(popen2.Popen3, popen2.Popen4):
universal_newlines = 1
def __init__(self, command, **kw):
if kw.get('stderr') == 'STDOUT':
apply(popen2.Popen4.__init__, (self, command, 1))
else:
apply(popen2.Popen3.__init__, (self, command, 1))
self.stdin = self.tochild
self.stdout = self.fromchild
self.stderr = self.childerr
def wait(self, *args, **kw):
resultcode = apply(popen2.Popen3.wait, (self,)+args, kw)
if os.WIFEXITED(resultcode):
return os.WEXITSTATUS(resultcode)
elif os.WIFSIGNALED(resultcode):
return os.WTERMSIG(resultcode)
else:
return None
subprocess.Popen = Popen3
PIPE = subprocess.PIPE
if subprocess.mswindows:
from win32file import ReadFile, WriteFile
from win32pipe import PeekNamedPipe
import msvcrt
else:
import select
import fcntl
try: fcntl.F_GETFL
except AttributeError: fcntl.F_GETFL = 3
try: fcntl.F_SETFL
except AttributeError: fcntl.F_SETFL = 4
class Popen(subprocess.Popen):
def recv(self, maxsize=None):
return self._recv('stdout', maxsize)
def recv_err(self, maxsize=None):
return self._recv('stderr', maxsize)
def send_recv(self, input='', maxsize=None):
return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
def get_conn_maxsize(self, which, maxsize):
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return getattr(self, which), maxsize
def _close(self, which):
getattr(self, which).close()
setattr(self, which, None)
if subprocess.mswindows:
def send(self, input):
if not self.stdin:
return None
try:
x = msvcrt.get_osfhandle(self.stdin.fileno())
(errCode, written) = WriteFile(x, input)
except ValueError:
return self._close('stdin')
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
x = msvcrt.get_osfhandle(conn.fileno())
(read, nAvail, nMessage) = PeekNamedPipe(x, 0)
if maxsize < nAvail:
nAvail = maxsize
if nAvail > 0:
(errCode, read) = ReadFile(x, nAvail, None)
except ValueError:
return self._close(which)
except (subprocess.pywintypes.error, Exception), why:
if why[0] in (109, errno.ESHUTDOWN):
return self._close(which)
raise
return read
else:
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError, why:
if why[0] == errno.EPIPE:
return self._close('stdin')
raise
return written
def _recv(self, which, maxsize):
conn, maxsize = self.get_conn_maxsize(which, maxsize)
if conn is None:
return None
try:
flags = fcntl.fcntl(conn, fcntl.F_GETFL)
except TypeError:
flags = None
else:
if not conn.closed:
fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
try:
if not select.select([conn], [], [], 0)[0]:
return ''
r = conn.read(maxsize)
if not r:
return self._close(which)
return r
finally:
if not conn.closed and not flags is None:
fcntl.fcntl(conn, fcntl.F_SETFL, flags)
disconnect_message = "Other end disconnected!"
def recv_some(p, t=.1, e=1, tr=5, stderr=0):
if tr < 1:
tr = 1
x = time.time()+t
y = []
r = ''
pr = p.recv
if stderr:
pr = p.recv_err
while time.time() < x or r:
r = pr()
if r is None:
if e:
raise Exception(disconnect_message)
else:
break
elif r:
y.append(r)
else:
time.sleep(max((x-time.time())/tr, 0))
return ''.join(y)
def send_all(p, data):
while len(data):
sent = p.send(data)
if sent is None:
raise Exception(disconnect_message)
data = buffer(data, sent)
try:
object
except NameError:
class object:
pass
class TestCmd(object):
"""Class TestCmd
"""
def __init__(self, description = None,
program = None,
interpreter = None,
workdir = None,
subdir = None,
verbose = None,
match = None,
diff = None,
combine = 0,
universal_newlines = 1):
self._cwd = os.getcwd()
self.description_set(description)
self.program_set(program)
self.interpreter_set(interpreter)
if verbose is None:
try:
verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
except ValueError:
verbose = 0
self.verbose_set(verbose)
self.combine = combine
self.universal_newlines = universal_newlines
if match is not None:
self.match_function = match
else:
self.match_function = match_re
if diff is not None:
self.diff_function = diff
else:
try:
difflib
except NameError:
pass
else:
self.diff_function = simple_diff
self._dirlist = []
self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
if os.environ.has_key('PRESERVE') and not os.environ['PRESERVE'] is '':
self._preserve['pass_test'] = os.environ['PRESERVE']
self._preserve['fail_test'] = os.environ['PRESERVE']
self._preserve['no_result'] = os.environ['PRESERVE']
else:
try:
self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
except KeyError:
pass
try:
self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
except KeyError:
pass
try:
self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
except KeyError:
pass
self._stdout = []
self._stderr = []
self.status = None
self.condition = 'no_result'
self.workdir_set(workdir)
self.subdir(subdir)
def __del__(self):
self.cleanup()
def __repr__(self):
return "%x" % id(self)
banner_char = '='
banner_width = 80
def banner(self, s, width=None):
if width is None:
width = self.banner_width
return s + self.banner_char * (width - len(s))
if os.name == 'posix':
def escape(self, arg):
"escape shell special characters"
slash = '\\'
special = '"$'
arg = string.replace(arg, slash, slash+slash)
for c in special:
arg = string.replace(arg, c, slash+c)
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
else:
def escape(self, arg):
if re_space.search(arg):
arg = '"' + arg + '"'
return arg
def canonicalize(self, path):
if is_List(path):
path = apply(os.path.join, tuple(path))
if not os.path.isabs(path):
path = os.path.join(self.workdir, path)
return path
def chmod(self, path, mode):
"""Changes permissions on the specified file or directory
path name."""
path = self.canonicalize(path)
os.chmod(path, mode)
def cleanup(self, condition = None):
"""Removes any temporary working directories for the specified
TestCmd environment. If the environment variable PRESERVE was
set when the TestCmd environment was created, temporary working
directories are not removed. If any of the environment variables
PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
when the TestCmd environment was created, then temporary working
directories are not removed if the test passed, failed, or had
no result, respectively. Temporary working directories are also
preserved for conditions specified via the preserve method.
Typically, this method is not called directly, but is used when
the script exits to clean up temporary working directories as
appropriate for the exit status.
"""
if not self._dirlist:
return
os.chdir(self._cwd)
self.workdir = None
if condition is None:
condition = self.condition
if self._preserve[condition]:
for dir in self._dirlist:
print "Preserved directory", dir
else:
list = self._dirlist[:]
list.reverse()
for dir in list:
self.writable(dir, 1)
shutil.rmtree(dir, ignore_errors = 1)
self._dirlist = []
try:
global _Cleanup
_Cleanup.remove(self)
except (AttributeError, ValueError):
pass
def command_args(self, program = None,
interpreter = None,
arguments = None):
if program:
if type(program) == type('') and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
else:
program = self.program
if not interpreter:
interpreter = self.interpreter
if not type(program) in [type([]), type(())]:
program = [program]
cmd = list(program)
if interpreter:
if not type(interpreter) in [type([]), type(())]:
interpreter = [interpreter]
cmd = list(interpreter) + cmd
if arguments:
if type(arguments) == type(''):
arguments = string.split(arguments)
cmd.extend(arguments)
return cmd
def description_set(self, description):
"""Set the description of the functionality being tested.
"""
self.description = description
try:
difflib
except NameError:
def diff(self, a, b, name, *args, **kw):
print self.banner('Expected %s' % name)
print a
print self.banner('Actual %s' % name)
print b
else:
def diff(self, a, b, name, *args, **kw):
print self.banner(name)
args = (a.splitlines(), b.splitlines()) + args
lines = apply(self.diff_function, args, kw)
for l in lines:
print l
def fail_test(self, condition = 1, function = None, skip = 0):
"""Cause the test to fail.
"""
if not condition:
return
self.condition = 'fail_test'
fail_test(self = self,
condition = condition,
function = function,
skip = skip)
def interpreter_set(self, interpreter):
"""Set the program to be used to interpret the program
under test as a script.
"""
self.interpreter = interpreter
def match(self, lines, matches):
"""Compare actual and expected file contents.
"""
return self.match_function(lines, matches)
def match_exact(self, lines, matches):
"""Compare actual and expected file contents.
"""
return match_exact(lines, matches)
def match_re(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re(lines, res)
def match_re_dotall(self, lines, res):
"""Compare actual and expected file contents.
"""
return match_re_dotall(lines, res)
def no_result(self, condition = 1, function = None, skip = 0):
"""Report that the test could not be run.
"""
if not condition:
return
self.condition = 'no_result'
no_result(self = self,
condition = condition,
function = function,
skip = skip)
def pass_test(self, condition = 1, function = None):
"""Cause the test to pass.
"""
if not condition:
return
self.condition = 'pass_test'
pass_test(self = self, condition = condition, function = function)
def preserve(self, *conditions):
"""Arrange for the temporary working directories for the
specified TestCmd environment to be preserved for one or more
conditions. If no conditions are specified, arranges for
the temporary working directories to be preserved for all
conditions.
"""
if conditions is ():
conditions = ('pass_test', 'fail_test', 'no_result')
for cond in conditions:
self._preserve[cond] = 1
def program_set(self, program):
"""Set the executable program or script to be tested.
"""
if program and not os.path.isabs(program):
program = os.path.join(self._cwd, program)
self.program = program
def read(self, file, mode = 'rb'):
"""Reads and returns the contents of the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name. The I/O mode for the file may
be specified; it must begin with an 'r'. The default is
'rb' (binary read).
"""
file = self.canonicalize(file)
if mode[0] != 'r':
raise ValueError, "mode must begin with 'r'"
with open(file, mode) as f:
result = f.read()
return result
def rmdir(self, dir):
"""Removes the specified dir name.
The dir name may be a list, in which case the elements are
concatenated with the os.path.join() method. The dir is
assumed to be under the temporary working directory unless it
is an absolute path name.
The dir must be empty.
"""
dir = self.canonicalize(dir)
os.rmdir(dir)
def start(self, program = None,
interpreter = None,
arguments = None,
universal_newlines = None,
**kw):
"""
Starts a program or script for the test environment.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
cmd = self.command_args(program, interpreter, arguments)
cmd_string = string.join(map(self.escape, cmd), ' ')
if self.verbose:
sys.stderr.write(cmd_string + "\n")
if universal_newlines is None:
universal_newlines = self.universal_newlines
stdin = kw.get('stdin', None)
if stdin is not None:
stdin = subprocess.PIPE
combine = kw.get('combine', self.combine)
if combine:
stderr_value = subprocess.STDOUT
else:
stderr_value = subprocess.PIPE
return Popen(cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=stderr_value,
universal_newlines=universal_newlines)
def finish(self, popen, **kw):
"""
Finishes and waits for the process being run under control of
the specified popen argument, recording the exit status,
standard output and error output.
"""
popen.stdin.close()
self.status = popen.wait()
if not self.status:
self.status = 0
self._stdout.append(popen.stdout.read())
if popen.stderr:
stderr = popen.stderr.read()
else:
stderr = ''
self._stderr.append(stderr)
def run(self, program = None,
interpreter = None,
arguments = None,
chdir = None,
stdin = None,
universal_newlines = None):
"""Runs a test of the program or script for the test
environment. Standard output and error output are saved for
future retrieval via the stdout() and stderr() methods.
The specified program will have the original directory
prepended unless it is enclosed in a [list].
"""
if chdir:
oldcwd = os.getcwd()
if not os.path.isabs(chdir):
chdir = os.path.join(self.workpath(chdir))
if self.verbose:
sys.stderr.write("chdir(" + chdir + ")\n")
os.chdir(chdir)
p = self.start(program,
interpreter,
arguments,
universal_newlines,
stdin=stdin)
if stdin:
if is_List(stdin):
for line in stdin:
p.stdin.write(line)
else:
p.stdin.write(stdin)
p.stdin.close()
out = p.stdout.read()
if p.stderr is None:
err = ''
else:
err = p.stderr.read()
try:
close_output = p.close_output
except AttributeError:
p.stdout.close()
if not p.stderr is None:
p.stderr.close()
else:
close_output()
self._stdout.append(out)
self._stderr.append(err)
self.status = p.wait()
if not self.status:
self.status = 0
if chdir:
os.chdir(oldcwd)
if self.verbose >= 2:
write = sys.stdout.write
write('============ STATUS: %d\n' % self.status)
out = self.stdout()
if out or self.verbose >= 3:
write('============ BEGIN STDOUT (len=%d):\n' % len(out))
write(out)
write('============ END STDOUT\n')
err = self.stderr()
if err or self.verbose >= 3:
write('============ BEGIN STDERR (len=%d)\n' % len(err))
write(err)
write('============ END STDERR\n')
def sleep(self, seconds = default_sleep_seconds):
"""Sleeps at least the specified number of seconds. If no
number is specified, sleeps at least the minimum number of
seconds necessary to advance file time stamps on the current
system. Sleeping more seconds is all right.
"""
time.sleep(seconds)
def stderr(self, run = None):
"""Returns the error output from the specified run number.
If there is no specified run number, then returns the error
output of the last run. If the run number is less than zero,
then returns the error output from that many runs back from the
current run.
"""
if not run:
run = len(self._stderr)
elif run < 0:
run = len(self._stderr) + run
run = run - 1
return self._stderr[run]
def stdout(self, run = None):
"""Returns the standard output from the specified run number.
If there is no specified run number, then returns the standard
output of the last run. If the run number is less than zero,
then returns the standard output from that many runs back from
the current run.
"""
if not run:
run = len(self._stdout)
elif run < 0:
run = len(self._stdout) + run
run = run - 1
return self._stdout[run]
def subdir(self, *subdirs):
"""Create new subdirectories under the temporary working
directory, one for each argument. An argument may be a list,
in which case the list elements are concatenated using the
os.path.join() method. Subdirectories multiple levels deep
must be created using a separate argument for each level:
test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
Returns the number of subdirectories actually created.
"""
count = 0
for sub in subdirs:
if sub is None:
continue
if is_List(sub):
sub = apply(os.path.join, tuple(sub))
new = os.path.join(self.workdir, sub)
try:
os.mkdir(new)
except OSError:
pass
else:
count = count + 1
return count
def symlink(self, target, link):
"""Creates a symlink to the specified target.
The link name may be a list, in which case the elements are
concatenated with the os.path.join() method. The link is
assumed to be under the temporary working directory unless it
is an absolute path name. The target is *not* assumed to be
under the temporary working directory.
"""
link = self.canonicalize(link)
os.symlink(target, link)
def tempdir(self, path=None):
"""Creates a temporary directory.
A unique directory name is generated if no path name is specified.
The directory is created, and will be removed when the TestCmd
object is destroyed.
"""
if path is None:
try:
path = tempfile.mktemp(prefix=tempfile.template)
except TypeError:
path = tempfile.mktemp()
os.mkdir(path)
cwd = os.getcwd()
try:
os.chdir(path)
path = os.getcwd()
finally:
os.chdir(cwd)
drive,rest = os.path.splitdrive(path)
if drive:
path = string.upper(drive) + rest
self._dirlist.append(path)
global _Cleanup
try:
_Cleanup.index(self)
except ValueError:
_Cleanup.append(self)
return path
def touch(self, path, mtime=None):
"""Updates the modification time on the specified file or
directory path name. The default is to update to the
current time if no explicit modification time is specified.
"""
path = self.canonicalize(path)
atime = os.path.getatime(path)
if mtime is None:
mtime = time.time()
os.utime(path, (atime, mtime))
def unlink(self, file):
"""Unlinks the specified file name.
The file name may be a list, in which case the elements are
concatenated with the os.path.join() method. The file is
assumed to be under the temporary working directory unless it
is an absolute path name.
"""
file = self.canonicalize(file)
os.unlink(file)
def verbose_set(self, verbose):
"""Set the verbose level.
"""
self.verbose = verbose
def where_is(self, file, path=None, pathext=None):
"""Find an executable file.
"""
if is_List(file):
file = apply(os.path.join, tuple(file))
if not os.path.isabs(file):
file = where_is(file, path, pathext)
return file
def workdir_set(self, path):
"""Creates a temporary working directory with the specified
path name. If the path is a null string (''), a unique
directory name is created.
"""
if (path != None):
if path == '':
path = None
path = self.tempdir(path)
self.workdir = path
def workpath(self, *args):
"""Returns the absolute path name to a subdirectory or file
within the current temporary working directory. Concatenates
the temporary working directory name with the specified
arguments using the os.path.join() method.
"""
return apply(os.path.join, (self.workdir,) + tuple(args))
def readable(self, top, read=1):
"""Make the specified directory tree readable (read == 1)
or not (read == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file readability.
"""
if sys.platform == 'win32':
return
if read:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
if os.path.isfile(top):
do_chmod(top)
elif read:
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def writable(self, top, write=1):
"""Make the specified directory tree writable (write == 1)
or not (write == None).
"""
if sys.platform == 'win32':
if write:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IWRITE)
except OSError: pass
else:
def do_chmod(fname):
try: os.chmod(fname, stat.S_IREAD)
except OSError: pass
else:
if write:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0200))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0200))
if os.path.isfile(top):
do_chmod(top)
else:
col = Collector(top)
os.path.walk(top, col, None)
for d in col.entries: do_chmod(d)
def executable(self, top, execute=1):
"""Make the specified directory tree executable (execute == 1)
or not (execute == None).
This method has no effect on Windows systems, which use a
completely different mechanism to control file executability.
"""
if sys.platform == 'win32':
return
if execute:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
else:
def do_chmod(fname):
try: st = os.stat(fname)
except OSError: pass
else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
if os.path.isfile(top):
do_chmod(top)
elif execute:
do_chmod(top)
def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
for n in names:
do_chmod(os.path.join(dirname, n))
os.path.walk(top, chmod_entries, None)
else:
col = Collector(top)
os.path.walk(top, col, None)
col.entries.reverse()
for d in col.entries: do_chmod(d)
def write(self, file, content, mode = 'wb'):
"""Writes the specified content text (second argument) to the
specified file name (first argument). The file name may be
a list, in which case the elements are concatenated with the
os.path.join() method. The file is created under the temporary
working directory. Any subdirectories in the path must already
exist. The I/O mode for the file may be specified; it must
begin with a 'w'. The default is 'wb' (binary write).
"""
file = self.canonicalize(file)
if mode[0] != 'w':
raise ValueError, "mode must begin with 'w'"
with open(file, mode) as f:
f.write(content)
|
data/adlnet/ADL_LRS/oauth_provider/managers.py
|
from django.db import models
class TokenManager(models.Manager):
def create_token(self, consumer, token_type, timestamp, scope,
is_approved=False, user=None, callback=None, callback_confirmed=False):
"""Shortcut to create a token with random key/secret."""
token, created = self.get_or_create(consumer=consumer,
token_type=token_type,
timestamp=timestamp,
scope=scope,
is_approved=is_approved,
user=user,
callback=callback,
callback_confirmed=callback_confirmed)
if created:
if consumer.rsa_signature:
token.secret = consumer.secret
token.save()
token.generate_random_codes()
return token
|
data/MLWave/kepler-mapper/km.py
|
from __future__ import division
import numpy as np
from collections import defaultdict
import json
import itertools
from sklearn import cluster, preprocessing, manifold
from datetime import datetime
import sys
class KeplerMapper(object):
def __init__(self, verbose=2):
self.verbose = verbose
self.chunk_dist = []
self.overlap_dist = []
self.d = []
self.nr_cubes = 0
self.overlap_perc = 0
self.clusterer = False
def fit_transform(self, X, projection="sum", scaler=preprocessing.MinMaxScaler()):
self.scaler = scaler
self.projection = str(projection)
if str(type(projection))[1:6] == "class":
reducer = projection
if self.verbose > 0:
try:
projection.set_params(**{"verbose":self.verbose})
except:
pass
print("\n..Projecting data using: \n\t%s\n"%str(projection))
X = reducer.fit_transform(X)
if isinstance(projection, str):
if self.verbose > 0:
print("\n..Projecting data using: %s"%(projection))
if projection == "sum":
X = np.sum(X, axis=1).reshape((X.shape[0],1))
if projection == "mean":
X = np.mean(X, axis=1).reshape((X.shape[0],1))
if projection == "median":
X = np.median(X, axis=1).reshape((X.shape[0],1))
if projection == "max":
X = np.max(X, axis=1).reshape((X.shape[0],1))
if projection == "min":
X = np.min(X, axis=1).reshape((X.shape[0],1))
if projection == "std":
X = np.std(X, axis=1).reshape((X.shape[0],1))
if projection == "dist_mean":
X_mean = np.mean(X, axis=0)
X = np.sum(np.sqrt((X - X_mean)**2), axis=1).reshape((X.shape[0],1))
if isinstance(projection, list):
if self.verbose > 0:
print("\n..Projecting data using: %s"%(str(projection)))
X = X[:,np.array(projection)]
if scaler is not None:
if self.verbose > 0:
print("\n..Scaling with: %s\n"%str(scaler))
X = scaler.fit_transform(X)
return X
def map(self, projected_X, inverse_X=None, clusterer=cluster.DBSCAN(eps=0.5,min_samples=3), nr_cubes=10, overlap_perc=0.1):
start = datetime.now()
def cube_coordinates_all(nr_cubes, nr_dimensions):
l = []
for x in range(nr_cubes):
l += [x] * nr_dimensions
return [np.array(list(f)) for f in sorted(set(itertools.permutations(l,nr_dimensions)))]
nodes = defaultdict(list)
links = defaultdict(list)
complex = {}
self.nr_cubes = nr_cubes
self.clusterer = clusterer
self.overlap_perc = overlap_perc
if self.verbose > 0:
print("Mapping on data shaped %s using dimensions\n"%(str(projected_X.shape)))
if inverse_X is None:
inverse_X = projected_X
self.chunk_dist = (np.max(projected_X, axis=0) - np.min(projected_X, axis=0))/nr_cubes
self.overlap_dist = self.overlap_perc * self.chunk_dist
self.d = np.min(projected_X, axis=0)
di = np.array([x for x in range(projected_X.shape[1])])
ids = np.array([x for x in range(projected_X.shape[0])])
projected_X = np.c_[ids,projected_X]
inverse_X = np.c_[ids,inverse_X]
if self.verbose > 0:
total_cubes = len(cube_coordinates_all(nr_cubes,projected_X.shape[1]))
print("Creating %s hypercubes."%total_cubes)
for i, coor in enumerate(cube_coordinates_all(nr_cubes,di.shape[0])):
hypercube = projected_X[ np.invert(np.any((projected_X[:,di+1] >= self.d[di] + (coor * self.chunk_dist[di])) &
(projected_X[:,di+1] < self.d[di] + (coor * self.chunk_dist[di]) + self.chunk_dist[di] + self.overlap_dist[di]) == False, axis=1 )) ]
if self.verbose > 1:
print("There are %s points in cube_%s / %s with starting range %s"%
(hypercube.shape[0],i,total_cubes,self.d[di] + (coor * self.chunk_dist[di])))
if hypercube.shape[0] > 0:
inverse_x = inverse_X[[int(nn) for nn in hypercube[:,0]]]
clusterer.fit(inverse_x[:,1:])
if self.verbose > 1:
print("Found %s clusters in cube_%s\n"%(np.unique(clusterer.labels_[clusterer.labels_ > -1]).shape[0],i))
for a in np.c_[hypercube[:,0],clusterer.labels_]:
if a[1] != -1:
cluster_id = str(coor[0])+"_"+str(i)+"_"+str(a[1])+"_"+str(coor)+"_"+str(self.d[di] + (coor * self.chunk_dist[di]))
nodes[cluster_id].append( int(a[0]) )
else:
if self.verbose > 1:
print("Cube_%s is empty.\n"%(i))
candidates = itertools.combinations(nodes.keys(),2)
for candidate in candidates:
if len(nodes[candidate[0]]+nodes[candidate[1]]) != len(set(nodes[candidate[0]]+nodes[candidate[1]])):
links[candidate[0]].append( candidate[1] )
if self.verbose > 0:
nr_links = 0
for k in links:
nr_links += len(links[k])
print("\ncreated %s edges and %s nodes in %s."%(nr_links,len(nodes),str(datetime.now()-start)))
complex["nodes"] = nodes
complex["links"] = links
complex["meta"] = self.projection
return complex
def visualize(self, complex, color_function="", path_html="mapper_visualization_output.html", title="My Data",
graph_link_distance=30, graph_gravity=0.1, graph_charge=-120, custom_tooltips=None, width_html=0,
height_html=0, show_tooltips=True, show_title=True, show_meta=True):
json_s = {}
json_s["nodes"] = []
json_s["links"] = []
k2e = {}
for e, k in enumerate(complex["nodes"]):
if custom_tooltips is not None:
tooltip_s = "<h2>Cluster %s</h2>"%k + " ".join([str(f) for f in custom_tooltips[complex["nodes"][k]]])
if color_function == "average_signal_cluster":
tooltip_i = int(((sum([f for f in custom_tooltips[complex["nodes"][k]]]) / len(custom_tooltips[complex["nodes"][k]])) * 30) )
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(tooltip_i)})
else:
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(k.split("_")[0])})
else:
tooltip_s = "<h2>Cluster %s</h2>Contains %s members."%(k,len(complex["nodes"][k]))
json_s["nodes"].append({"name": str(k), "tooltip": tooltip_s, "group": 2 * int(np.log(len(complex["nodes"][k]))), "color": str(k.split("_")[0])})
k2e[k] = e
for k in complex["links"]:
for link in complex["links"][k]:
json_s["links"].append({"source": k2e[k], "target":k2e[link],"value":1})
if width_html == 0:
width_css = "100%"
width_js = 'document.getElementById("holder").offsetWidth-20'
else:
width_css = "%spx" % width_html
width_js = "%s" % width_html
if height_html == 0:
height_css = "100%"
height_js = 'document.getElementById("holder").offsetHeight-20'
else:
height_css = "%spx" % height_html
height_js = "%s" % height_html
if show_tooltips == False:
tooltips_display = "display: none;"
else:
tooltips_display = ""
if show_meta == False:
meta_display = "display: none;"
else:
meta_display = ""
if show_title == False:
title_display = "display: none;"
else:
title_display = ""
with open(path_html,"wb") as outfile:
html = """<!DOCTYPE html>
<meta charset="utf-8">
<meta name="generator" content="KeplerMapper">
<title>%s | KeplerMapper</title>
<link href='https://fonts.googleapis.com/css?family=Roboto:700,300' rel='stylesheet' type='text/css'>
<style>
* {margin: 0; padding: 0;}
html { height: 100%%;}
body {background:
.link { stroke:
.divs div { border-radius: 50%%; background: red; position: absolute; }
.divs { position: absolute; top: 0; left: 0; }
h1 { %s padding: 20px; color:
h2 { text-shadow: 0px 1px
.meta { position: absolute; opacity: 0.9; width: 220px; top: 80px; left: 20px; display: block; %s background:
div.tooltip { position: absolute; width: 380px; display: block; %s padding: 20px; background:
}
</style>
<body>
<div id="holder">
<h1>%s</h1>
<p class="meta">
<b>Lens</b><br>%s<br><br>
<b>Cubes per dimension</b><br>%s<br><br>
<b>Overlap percentage</b><br>%s%%<br><br>
<b>Color Function</b><br>%s( %s )<br><br>
<b>Clusterer</b><br>%s<br><br>
<b>Scaler</b><br>%s
</p>
</div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"></script>
<script>
var width = %s,
height = %s;
var color = d3.scale.ordinal()
.domain(["0","1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13","14","15","16","17","18","19","20","21","22","23","24","25","26","27","28","29","30"])
.range(["
var force = d3.layout.force()
.charge(%s)
.linkDistance(%s)
.gravity(%s)
.size([width, height]);
var svg = d3.select("
.attr("width", width)
.attr("height", height);
var div = d3.select("
.attr("class", "tooltip")
.style("opacity", 0.0);
var divs = d3.select('
.attr('class', 'divs')
.attr('style', function(d) { return 'overflow: hidden; width: ' + width + 'px; height: ' + height + 'px;'; });
graph = %s;
force
.nodes(graph.nodes)
.links(graph.links)
.start();
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link")
.style("stroke-width", function(d) { return Math.sqrt(d.value); });
var node = divs.selectAll('div')
.data(graph.nodes)
.enter().append('div')
.on("mouseover", function(d) {
div.transition()
.duration(200)
.style("opacity", .9);
div .html(d.tooltip + "<br/>")
.style("left", (d3.event.pageX + 100) + "px")
.style("top", (d3.event.pageY - 28) + "px");
})
.on("mouseout", function(d) {
div.transition()
.duration(500)
.style("opacity", 0);
})
.call(force.drag);
node.append("title")
.text(function(d) { return d.name; });
force.on("tick", function() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; })
.attr('style', function(d) { return 'width: ' + (d.group * 2) + 'px; height: ' + (d.group * 2) + 'px; ' + 'left: '+(d.x-(d.group))+'px; ' + 'top: '+(d.y-(d.group))+'px; background: '+color(d.color)+'; box-shadow: 0px 0px 3px
;
});
</script>"""%(title,width_css, height_css, title_display, meta_display, tooltips_display, title,complex["meta"],self.nr_cubes,self.overlap_perc*100,color_function,complex["meta"],str(self.clusterer),str(self.scaler),width_js,height_js,graph_charge,graph_link_distance,graph_gravity,json.dumps(json_s))
outfile.write(html.encode("utf-8"))
if self.verbose > 0:
print("\nWrote d3.js graph to '%s'"%path_html)
|
data/Toblerity/Shapely/tests/test_multi.py
|
from . import unittest, test_int_types
class MultiGeometryTestCase(unittest.TestCase):
def subgeom_access_test(self, cls, geoms):
geom = cls(geoms)
for t in test_int_types:
for i, g in enumerate(geoms):
self.assertEqual(geom[t(i)], geoms[i])
|
data/ReactiveX/RxPY/rx/subjects/innersubscription.py
|
from rx import Lock
class InnerSubscription(object):
def __init__(self, subject, observer):
self.subject = subject
self.observer = observer
self.lock = Lock()
def dispose(self):
with self.lock:
if not self.subject.is_disposed and self.observer:
if self.observer in self.subject.observers:
self.subject.observers.remove(self.observer)
self.observer = None
|
data/Toblerity/Shapely/docs/code/parallel_offset_mitre.py
|
from matplotlib import pyplot
from shapely.geometry import LineString
from descartes import PolygonPatch
from figures import SIZE, BLUE, GRAY
def plot_coords(ax, x, y, color='
ax.plot(x, y, 'o', color=color, zorder=zorder)
def plot_line(ax, ob, color=GRAY):
parts = hasattr(ob, 'geoms') and ob or [ob]
for part in parts:
x, y = part.xy
ax.plot(x, y, color=color, linewidth=3, solid_capstyle='round', zorder=1)
def set_limits(ax, x_range, y_range):
ax.set_xlim(*x_range)
ax.set_xticks(range(*x_range) + [x_range[-1]])
ax.set_ylim(*y_range)
ax.set_yticks(range(*y_range) + [y_range[-1]])
ax.set_aspect(1)
line = LineString([(0, 0), (1, 1), (0, 2), (2, 2), (3, 1), (1, 0)])
line_bounds = line.bounds
ax_range = [int(line_bounds[0] - 1.0), int(line_bounds[2] + 1.0)]
ay_range = [int(line_bounds[1] - 1.0), int(line_bounds[3] + 1.0)]
fig = pyplot.figure(1, figsize=(SIZE[0], 2 * SIZE[1]), dpi=90)
ax = fig.add_subplot(221)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'left', join_style=2, mitre_limit=0.1)
plot_line(ax, offset, color=BLUE)
ax.set_title('a) left, limit=0.1')
set_limits(ax, ax_range, ay_range)
ax = fig.add_subplot(222)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'left', join_style=2, mitre_limit=10.0)
plot_line(ax, offset, color=BLUE)
ax.set_title('b) left, limit=10.0')
set_limits(ax, ax_range, ay_range)
ax = fig.add_subplot(223)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'right', join_style=2, mitre_limit=0.1)
plot_line(ax, offset, color=BLUE)
ax.set_title('c) right, limit=0.1')
set_limits(ax, ax_range, ay_range)
ax = fig.add_subplot(224)
plot_line(ax, line)
x, y = list(line.coords)[0]
plot_coords(ax, x, y)
offset = line.parallel_offset(0.5, 'right', join_style=2, mitre_limit=10.0)
plot_line(ax, offset, color=BLUE)
ax.set_title('d) right, limit=10.0')
set_limits(ax, ax_range, ay_range)
pyplot.show()
|
data/aerospike/aerospike-client-python/test/old_tests/_test_list_insert.py
|
import pytest
import sys
import random
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestListInsert(object):
def setup_class(cls):
"""
Setup method.
"""
hostlist, user, password = TestBaseClass.get_hosts()
config = {'hosts': hostlist}
if user is None and password is None:
TestListInsert.client = aerospike.client(config).connect()
else:
TestListInsert.client = aerospike.client(
config).connect(user, password)
def teardown_class(cls):
TestListInsert.client.close()
def setup_method(self, method):
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' %
(str(i)), 'age': [i, i + 1], 'city': ['Pune', 'Dehli']}
TestListInsert.client.put(key, rec)
key = ('test', 'demo', 'bytearray_key')
TestListInsert.client.put(
key, {"bytearray_bin": bytearray("asd;as[d'as;d", "utf-8")})
def teardown_method(self, method):
"""
Teardoen method.
"""
for i in range(5):
key = ('test', 'demo', i)
TestListInsert.client.remove(key)
key = ('test', 'demo', 'bytearray_key')
TestListInsert.client.remove(key)
def test_list_insert_integer(self):
"""
Invoke list_insert() insert integer value with correct parameters
"""
key = ('test', 'demo', 1)
TestListInsert.client.list_insert(key, "age", 0, 999)
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {
'age': [999, 1, 2], 'name': 'name1', 'city': ['Pune', 'Dehli']}
def test_list_insert_string(self):
"""
Invoke list_insert() inserts string with correct parameters
"""
key = ('test', 'demo', 1)
TestListInsert.client.list_insert(key, "city", 0, "Chennai")
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {
'age': [1, 2], 'name': 'name1',
'city': ['Chennai', 'Pune', 'Dehli']}
def test_list_insert_unicode_string(self):
"""
Invoke list_insert() inserts unicode string
"""
key = ('test', 'demo', 1)
TestListInsert.client.list_insert(key, "city", 3, u"Mumbai")
key, _, bins = TestListInsert.client.get(key)
assert bins == {
'age': [1, 2], 'city': ['Pune', 'Dehli', None, u'Mumbai'],
'name': 'name1'}
def test_list_insert_list_with_correct_policy(self):
"""
Invoke list_insert() inserts list with correct policy
"""
key = ('test', 'demo', 2)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
TestListInsert.client.list_insert(
key, "age", 5, [45, 50, 80], {}, policy)
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {'age': [2, 3, None, None, None, [45, 50, 80]],
'city': ['Pune', 'Dehli'], 'name': 'name2'}
def test_list_insert_float(self):
"""
Invoke list_insert() insert float into the list
"""
key = ('test', 'demo', 2)
TestListInsert.client.list_insert(key, "age", 7, 85.12)
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {'age': [2, 3, None, None, None, None, None, 85.12],
'city': ['Pune', 'Dehli'], 'name': 'name2'}
def test_list_insert_map(self):
"""
Invoke list_insert() insert map into the list
"""
key = ('test', 'demo', 3)
TestListInsert.client.list_insert(key, "age", 1, {'k1': 29})
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {
'age': [3, {'k1': 29}, 4], 'city': ['Pune', 'Dehli'],
'name': 'name3'}
def test_list_insert_bytearray(self):
"""
Invoke list_insert() insert bytearray into the list
"""
key = ('test', 'demo', 1)
TestListInsert.client.list_insert(
key, "age", 2, bytearray("asd;as[d'as;d", "utf-8"))
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {'age': [
1, 2, bytearray(b"asd;as[d\'as;d")], 'city': ['Pune', 'Dehli'],
'name': 'name1'}
def test_list_insert_boolean(self):
"""
Invoke list_insert() insert boolean into the list
"""
key = ('test', 'demo', 1)
TestListInsert.client.list_insert(key, "age", 6, False)
(key, _, bins) = TestListInsert.client.get(key)
assert bins == {'age': [1, 2, None, None, None, None, 0], 'city': [
'Pune', 'Dehli'], 'name': 'name1'}
def test_list_insert_with_nonexistent_key(self):
"""
Invoke list_insert() with non-existent key
"""
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 30
length = random.randint(minLength, maxLength)
key = ('test', 'demo', ''.join(map(lambda unused:
random.choice(charSet),
range(length))) + ".com")
status = TestListInsert.client.list_insert(key, "abc", 2, 122)
assert status == 0
(key, _, bins) = TestListInsert.client.get(key)
assert status == 0
assert bins == {'abc': [None, None, 122]}
TestListInsert.client.remove(key)
def test_list_insert_with_nonexistent_bin(self):
"""
Invoke list_insert() with non-existent bin
"""
key = ('test', 'demo', 1)
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 10
length = random.randint(minLength, maxLength)
bin = ''.join(map(lambda unused:
random.choice(charSet), range(length))) + ".com"
status = TestListInsert.client.list_insert(key, bin, 3, 585)
assert status == 0
(key, _, bins) = TestListInsert.client.get(key)
assert status == 0
assert bins == {'age': [1, 2], 'name': 'name1',
'city': ['Pune', 'Dehli'],
bin: [None, None, None, 585]}
def test_list_insert_with_no_parameters(self):
"""
Invoke list_insert() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
TestListInsert.client.list_insert()
assert "Required argument 'key' (pos 1) not found" in str(
typeError.value)
def test_list_insert_with_incorrect_policy(self):
"""
Invoke list_insert() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
TestListInsert.client.list_insert(key, "age", 6, "str", {}, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_list_insert_with_extra_parameter(self):
"""
Invoke list_insert() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
TestListInsert.client.list_insert(
key, "age", 3, 999, {}, policy, "")
assert "list_insert() takes at most 6 arguments (7 given)" in str(
typeError.value)
def test_list_insert_policy_is_string(self):
"""
Invoke list_insert() with policy is string
"""
key = ('test', 'demo', 1)
try:
TestListInsert.client.list_insert(key, "age", 1, 85, {}, "")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_list_insert_key_is_none(self):
"""
Invoke list_insert() with key is none
"""
try:
TestListInsert.client.list_insert(None, "age", 1, 45)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_list_insert_bin_is_none(self):
"""
Invoke list_insert() with bin is none
"""
key = ('test', 'demo', 1)
try:
TestListInsert.client.list_insert(key, None, 2, "str")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bin name should be of type string"
def test_list_insert_meta_type_integer(self):
"""
Invoke list_insert() with metadata input is of type integer
"""
key = ('test', 'demo', 1)
try:
TestListInsert.client.list_insert(key, "contact_no", 1, 85, 888)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Metadata should be of type dictionary"
def test_list_insert_index_negative(self):
"""
Invoke list_insert() insert with index is negative integer
"""
key = ('test', 'demo', 1)
try:
TestListInsert.client.list_insert(key, "age", -6, False)
except e.InvalidRequest as exception:
assert exception.code == 4
assert exception.msg == 'AEROSPIKE_ERR_REQUEST_INVALID'
def test_list_insert_index_type_string(self):
"""
Invoke list_insert() insert with index is of type string
"""
key = ('test', 'demo', 1)
with pytest.raises(TypeError) as typeError:
TestListInsert.client.list_insert(key, "age", "Fifth", False)
assert "an integer is required" in str(typeError.value)
|
data/Newmu/Theano-Tutorials/4_modern_net.py
|
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import numpy as np
from load import mnist
srng = RandomStreams()
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def rectify(X):
return T.maximum(X, 0.)
def softmax(X):
e_x = T.exp(X - X.max(axis=1).dimshuffle(0, 'x'))
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x')
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
acc = theano.shared(p.get_value() * 0.)
acc_new = rho * acc + (1 - rho) * g ** 2
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
def dropout(X, p=0.):
if p > 0:
retain_prob = 1 - p
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
def model(X, w_h, w_h2, w_o, p_drop_input, p_drop_hidden):
X = dropout(X, p_drop_input)
h = rectify(T.dot(X, w_h))
h = dropout(h, p_drop_hidden)
h2 = rectify(T.dot(h, w_h2))
h2 = dropout(h2, p_drop_hidden)
py_x = softmax(T.dot(h2, w_o))
return h, h2, py_x
trX, teX, trY, teY = mnist(onehot=True)
X = T.fmatrix()
Y = T.fmatrix()
w_h = init_weights((784, 625))
w_h2 = init_weights((625, 625))
w_o = init_weights((625, 10))
noise_h, noise_h2, noise_py_x = model(X, w_h, w_h2, w_o, 0.2, 0.5)
h, h2, py_x = model(X, w_h, w_h2, w_o, 0., 0.)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(noise_py_x, Y))
params = [w_h, w_h2, w_o]
updates = RMSprop(cost, params, lr=0.001)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
predict = theano.function(inputs=[X], outputs=y_x, allow_input_downcast=True)
for i in range(100):
for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):
cost = train(trX[start:end], trY[start:end])
print np.mean(np.argmax(teY, axis=1) == predict(teX))
|
data/PyHDI/Pyverilog/pyverilog/dataflow/replace.py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
from pyverilog.dataflow.dataflow import *
def replaceUndefined(tree, termname):
if tree is None: return DFTerminal(termname)
if isinstance(tree, DFUndefined): return DFTerminal(termname)
if isinstance(tree, DFConstant): return tree
if isinstance(tree, DFEvalValue): return tree
if isinstance(tree, DFTerminal): return tree
if isinstance(tree, DFBranch):
condnode = replaceUndefined(tree.condnode, termname)
truenode = replaceUndefined(tree.truenode, termname)
falsenode = replaceUndefined(tree.falsenode, termname)
return DFBranch(condnode, truenode, falsenode)
if isinstance(tree, DFOperator):
nextnodes = []
for n in tree.nextnodes:
nextnodes.append(replaceUndefined(n, termname))
return DFOperator(tuple(nextnodes), tree.operator)
if isinstance(tree, DFPartselect):
msb = replaceUndefined(tree.msb, termname)
lsb = replaceUndefined(tree.lsb, termname)
var = replaceUndefined(tree.var, termname)
return DFPartselect(var, msb, lsb)
if isinstance(tree, DFPointer):
ptr = replaceUndefined(tree.ptr, termname)
var = replaceUndefined(tree.var, termname)
return DFPointer(var, ptr)
if isinstance(tree, DFConcat):
nextnodes = []
for n in tree.nextnodes:
nextnodes.append(replaceUndefined(n, termname))
return DFConcat(tuple(nextnodes))
raise DefinitionError('Undefined DFNode type: %s %s' % (str(type(tree)), str(tree)))
|
data/JohnMaguire/Cardinal/cardinal/test_exceptions.py
|
import pytest
import exceptions
def test_exceptions():
with pytest.raises(Exception):
raise exceptions.CardinalException
with pytest.raises(exceptions.CardinalException):
raise exceptions.InternalError
with pytest.raises(exceptions.CardinalException):
raise exceptions.PluginError
with pytest.raises(exceptions.CardinalException):
raise exceptions.CommandNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.ConfigNotFoundError
with pytest.raises(exceptions.CardinalException):
raise exceptions.AmbiguousConfigError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventAlreadyExistsError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventDoesNotExistError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventCallbackError
with pytest.raises(exceptions.CardinalException):
raise exceptions.EventRejectedMessage
|
data/OpenMDAO/OpenMDAO-Framework/examples/openmdao.examples.metamodel_tutorial/openmdao/examples/metamodel_tutorial/cokriging_forrester_example.py
|
"""
Cokriging example from [Forrester 2007] to show
MultiFiMetaModel and MultiFiCoKrigingSurrogate usage
"""
import numpy as np
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float
from openmdao.lib.drivers.api import CaseIteratorDriver
from openmdao.lib.components.api import MultiFiMetaModel
from openmdao.lib.surrogatemodels.api import MultiFiCoKrigingSurrogate, KrigingSurrogate
class Model(Component):
x = Float(0, iotype="in")
f_x = Float(0.0, iotype="out")
def execute(self):
x = self.x
self.f_x = ((6*x-2)**2)*np.sin((6*x-2)*2)
class LowFidelityModel(Component):
x = Float(0.0, iotype="in")
f_x = Float(0.0, iotype="out")
def execute(self):
x = self.x
self.f_x = 0.5*((6*x-2)**2)*np.sin((6*x-2)*2)+(x-0.5)*10. - 5
class HighFidelityModel(Model):
pass
class CasesBuilder(Assembly):
def __init__(self, model, cases):
self.instance = model
self.cases = cases
super(CasesBuilder, self).__init__()
def configure(self):
self.add("model", self.instance)
self.add("driver", CaseIteratorDriver())
self.driver.workflow.add('model')
self.driver.add_parameter("model.x", low=0, high=1)
self.driver.add_response("model.f_x")
self.driver.case_inputs.model.x = self.cases
self.create_passthrough('driver.case_inputs.model.x')
self.create_passthrough('driver.case_outputs.model.f_x')
class Simulation(Assembly):
def __init__(self, surrogate, nfi=1):
self.surrogate = surrogate
self.nfi = nfi
super(Simulation, self).__init__()
def configure(self):
doe_e = [0.0, 0.4, 0.6, 1.0]
doe_c = [0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9] + doe_e
self.add('hifi_cases', CasesBuilder(HighFidelityModel(), doe_e))
self.add('lofi_cases', CasesBuilder(LowFidelityModel(), doe_c))
self.add("meta_model", MultiFiMetaModel(params=('x', ),
responses=('f_x', ), nfi=self.nfi))
self.meta_model.default_surrogate = self.surrogate
self.connect('hifi_cases.x' , 'meta_model.params.x')
self.connect('hifi_cases.f_x', 'meta_model.responses.f_x')
if self.nfi > 1:
self.connect('lofi_cases.x' , 'meta_model.params.x_fi2')
self.connect('lofi_cases.f_x', 'meta_model.responses.f_x_fi2')
self.add('mm_checker', CaseIteratorDriver())
self.add('model', Model())
self.mm_checker.add_parameter("meta_model.x", low=0, high=1)
self.mm_checker.add_parameter("model.x", low=0, high=1)
self.mm_checker.add_response("model.f_x")
self.mm_checker.add_response("meta_model.f_x")
ngrid = 100
self.mm_checker.case_inputs.meta_model.x = np.linspace(0,1,ngrid)
self.mm_checker.case_inputs.model.x = np.linspace(0,1,ngrid)
self.driver.workflow.add('hifi_cases')
if self.nfi > 1:
self.driver.workflow.add('lofi_cases')
self.driver.workflow.add('mm_checker')
if __name__ == "__main__":
surrogate = MultiFiCoKrigingSurrogate()
sim_cok = Simulation(surrogate, nfi=2)
sim_cok.run()
predicted_cok = np.array([d.mu for d in sim_cok.mm_checker.case_outputs.meta_model.f_x])
sigma_cok = np.array([d.sigma for d in sim_cok.mm_checker.case_outputs.meta_model.f_x])
surrogate = KrigingSurrogate()
sim_k = Simulation(surrogate, nfi=1)
sim_k.run()
predicted_k = np.array([d.mu for d in sim_k.mm_checker.case_outputs.meta_model.f_x])
sigma_k = np.array([d.sigma for d in sim_k.mm_checker.case_outputs.meta_model.f_x])
actual = sim_k.mm_checker.case_outputs.model.f_x
check = sim_k.mm_checker.case_inputs.meta_model.x
import pylab as plt
plt.figure(2)
plt.ioff()
plt.plot(check, actual, 'k', label='True f')
plt.plot(sim_cok.hifi_cases.x, sim_cok.hifi_cases.f_x,'ok',label="High Fi")
plt.plot(sim_cok.lofi_cases.x, sim_cok.lofi_cases.f_x,'or',label="Low Fi")
plt.plot(check, predicted_cok, 'g', label='Co-kriging')
plt.plot(check, predicted_cok + 2*sigma_cok, 'g', alpha=0.5, label='I95%')
plt.plot(check, predicted_cok - 2*sigma_cok, 'g', alpha=0.5)
plt.fill_between(check, predicted_cok + 2*sigma_cok,
predicted_cok - 2*sigma_cok, facecolor='g', alpha=0.2)
plt.plot(check, predicted_k, 'b', label='Krigring')
plt.plot(check, predicted_k + 2*sigma_k, 'b', alpha=0.5, label='I95%')
plt.plot(check, predicted_k - 2*sigma_k, 'b', alpha=0.5)
plt.fill_between(check, predicted_k + 2*sigma_k,
predicted_k - 2*sigma_k, facecolor='b', alpha=0.2)
plt.legend(loc='best')
plt.show()
error = 0.
for a,p in zip(actual,predicted_cok):
error += (a-p)**2
error = (error/len(actual))
print "RMSE Cokriging = %g" % error
error = 0.
for a,p in zip(actual, predicted_k):
error += (a-p)**2
error = (error/len(actual))
print "RMSE Kriging = %g" % error
|
data/agiliq/django-datagrid/books/urls.py
|
from django.conf.urls import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^books/', include('mylibrary.urls')),
(r'^admin/', include(admin.site.urls)),
)
|
data/adblockplus/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py
|
"""
Make sure overwriting read-only files works as expected (via win-tool).
"""
import TestGyp
import filecmp
import os
import stat
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['ninja'])
os.makedirs('subdir')
read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
for f in read_only_files:
test.write(f, 'source_contents')
test.chmod(f, stat.S_IREAD)
if os.access(f, os.W_OK):
test.fail_test()
os.makedirs(test.built_file_path('dest/subdir'))
for f in read_only_files:
f = os.path.join('dest', f)
test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
test.chmod(test.built_file_path(f), stat.S_IREAD)
if os.access(test.built_file_path(f), os.W_OK):
test.fail_test()
test.run_gyp('copies_readonly_files.gyp')
test.build('copies_readonly_files.gyp')
for f in read_only_files:
f = os.path.join('dest', f)
test.must_contain(test.built_file_path(f), 'source_contents')
for f in read_only_files:
if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
test.fail_test()
test.pass_test()
|
data/Mendeley/mrec/doc/conf.py
|
import sys, os
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
needs_sphinx = '1.0'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'sphinx.ext.autosummary', 'numpydoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'mrec'
copyright = u'2013, Mendeley Ltd.'
import pkg_resources
try:
release = pkg_resources.get_distribution('mrec').version
except pkg_resources.DistributionNotFound:
print 'To build the documentation, The distribution information of mrec'
print 'has to be available. Either install the package into your'
print 'development environment or run "python setup.py develop" to setup'
print 'the metadata.'
sys.exit(1)
del pkg_resources
version = '.'.join(release.split('.')[:2])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'sphinxdoc'
html_static_path = ['_static']
html_use_smartypants = True
htmlhelp_basename = 'mrecdoc'
latex_elements = {
}
latex_documents = [
('index', 'mrec.tex', u'mrec Documentation',
u'Mark Levy, Mendeley Ltd.', 'manual'),
]
man_pages = [
('index', 'mrec', u'mrec Documentation',
[u'Mark Levy, Mendeley Ltd.'], 1)
]
texinfo_documents = [
('index', 'mrec', u'mrec Documentation',
u'Mark Levy, Mendeley Ltd.', 'mrec', 'One line description of project.',
'Miscellaneous'),
]
|
data/adaptivdesign/django-sellmo/sellmo/contrib/product/apps.py
|
from sellmo.core.apps import SellmoAppConfig
class DefaultConfig(SellmoAppConfig):
name = 'sellmo.contrib.product'
dependencies = ['sellmo.apps.product']
|
data/ImageEngine/gaffer/python/GafferSceneTest/OpenGLRenderTest.py
|
import os
import unittest
import IECore
import Gaffer
import GafferImage
import GafferScene
import GafferSceneTest
@unittest.skipIf( "TRAVIS" in os.environ, "OpenGL not set up on Travis" )
class OpenGLRenderTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/test.exr" ) )
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"].setValue( IECore.V3f( 0, 0, -5 ) )
s["image"] = GafferImage.ImageReader()
s["image"]["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" ) )
s["shader"] = GafferScene.OpenGLShader()
s["shader"].loadShader( "Texture" )
s["shader"]["parameters"]["texture"].setInput( s["image"]["out"] )
s["shader"]["parameters"]["mult"].setValue( 1 )
s["shader"]["parameters"]["tint"].setValue( IECore.Color4f( 1 ) )
s["assignment"] = GafferScene.ShaderAssignment()
s["assignment"]["in"].setInput( s["plane"]["out"] )
s["assignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECore.Display(
self.temporaryDirectory() + "/test.exr",
"exr",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["assignment"]["out"] )
s["render"] = GafferScene.OpenGLRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/test.exr" ) )
i = IECore.EXRImageReader( self.temporaryDirectory() + "/test.exr" ).read()
e = IECore.ImagePrimitiveEvaluator( i )
r = e.createResult()
e.pointAtUV( IECore.V2f( 0.5 ), r )
self.assertAlmostEqual( r.floatPrimVar( e.R() ), 0.666666, 5 )
self.assertAlmostEqual( r.floatPrimVar( e.G() ), 0.666666, 5 )
self.assertEqual( r.floatPrimVar( e.B() ), 0 )
def testOutputDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addMember( "renderDirectory", self.temporaryDirectory() + "/openGLRenderTest" )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECore.Display(
"$renderDirectory/test.
"exr",
"rgba",
{}
)
)
s["render"] = GafferScene.OpenGLRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/openGLRenderTest" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/openGLRenderTest/test.0001.exr" ) )
s["fileName"].setValue( "/tmp/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/openGLRenderTest" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/openGLRenderTest/test.0001.exr" ) )
def testHash( self ) :
c = Gaffer.Context()
c.setFrame( 1 )
c2 = Gaffer.Context()
c2.setFrame( 2 )
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput( "beauty", IECore.Display( "$renderDirectory/test.
s["render"] = GafferScene.OpenGLRender()
self.assertEqual( s["render"].hash( c ), IECore.MurmurHash() )
s["render"]["in"].setInput( s["outputs"]["out"] )
self.assertNotEqual( s["render"].hash( c ), IECore.MurmurHash() )
self.assertNotEqual( s["render"].hash( c ), s["render"].hash( c2 ) )
current = s["render"].hash( c )
c["renderDirectory"] = self.temporaryDirectory() + "/openGLRenderTest"
self.assertNotEqual( s["render"].hash( c ), current )
current = s["render"].hash( c )
c["renderDirectory"] = self.temporaryDirectory() + "/openGLRenderTest2"
self.assertNotEqual( s["render"].hash( c ), current )
current = s["render"].hash( c )
c["ui:something"] = "alterTheUI"
self.assertEqual( s["render"].hash( c ), current )
current = s["render"].hash( c )
s["render"]["in"].setInput( s["plane"]["out"] )
self.assertNotEqual( s["render"].hash( c ), current )
if __name__ == "__main__":
unittest.main()
|
data/abhik/pebl/src/pebl/test/test_network.py
|
import os
import numpy as N
from pebl import network, data, config
class TestEdgeSet:
def setUp(self):
self.edges = network.EdgeSet(num_nodes=6)
self.tuplelist = [(0,2), (0,5), (1,2)]
for edge in self.tuplelist:
self.edges.add(edge)
def test_add(self):
self.edges.add((5,1))
assert set(self.edges) == set(self.tuplelist + [(5,1)])
def test_add_many(self):
self.edges.add_many([(5,1), (5,2)])
assert set(self.edges) == set(self.tuplelist + [(5,1), (5,2)])
def test_remove(self):
self.edges.remove((0,2))
assert set(self.edges) == set([(0,5), (1,2)])
def test_remove_many(self):
self.edges.remove_many([(0,2), (0,5)])
assert set(self.edges) == set([(1,2)])
def test_edgeiter(self):
assert set(self.edges) == set(self.tuplelist), "Can use edgelist as an iterable object."
def test_len(self):
assert len(self.edges) == len(self.tuplelist), "Can determine number of edges"
def test_addedges1(self):
self.edges.add((0, 3))
assert (0,3) in self.edges, "Can add edges to edgelist."
def test_incoming(self):
assert set(self.edges.incoming(0)) == set([]), "Testing edgelist.incoming"
assert set(self.edges.incoming(2)) == set([0,1]), "Testing edgelist.incoming"
def test_outgoing(self):
assert set(self.edges.outgoing(2)) == set([]), "Testing edgelist.outgoing"
assert set(self.edges.outgoing(0)) == set([2,5]), "Testing edgelist.outgoing"
def test_parents(self):
assert set(self.edges.parents(0)) == set([]), "Testing edgelist.parents"
assert set(self.edges.parents(2)) == set([0,1]), "Testing edgelist.parents"
def test_children(self):
assert set(self.edges.children(2)) == set([]), "Testing edgelist.children"
assert set(self.edges.children(0)) == set([2,5]), "Testing edgelist.children"
def test_contains1(self):
assert (0,2) in self.edges, "Can check if edge in edgelist."
def test_contains2(self):
assert (99,99) not in self.edges , "Edge not in edgelist."
def test_remove(self):
self.edges.remove((0,2))
assert set(self.edges) == set([(0,5), (1,2)]), "Can remove an edge."
def test_clear(self):
self.edges.clear()
assert list(self.edges) == [], "Can clear edgelist."
def test_adjacency_matrix(self):
expected = [
[0,0,1,0,0,1],
[0,0,1,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
[0,0,0,0,0,0],
]
assert (self.edges.adjacency_matrix == N.array(expected, dtype=bool)).all(), "Testing boolean matrix representation."
class TestIsAcyclic:
def setUp(self):
self.net = network.Network([data.DiscreteVariable(i,3) for i in xrange(6)])
for edge in [(0,1), (0,3), (1,2)]:
self.net.edges.add(edge)
def test_loopchecking(self):
assert self.net.is_acyclic(), "Should be acyclic"
def test_loopchecking2(self):
self.net.edges.add((2,0))
assert not self.net.is_acyclic(), "Should not be acyclic"
class TestNetwork:
expected_dotstring = """digraph G {\n\t"0";\n\t"1";\n\t"2";\n\t"3";\n\t"4";\n\t"5";\n\t"0" -> "1";\n\t"0" -> "3";\n\t"1" -> "2";\n}"""
expected_string = '0,1;0,3;1,2'
def setUp(self):
self.net = network.Network([data.DiscreteVariable(i,3) for i in xrange(6)])
for edge in [(0,1), (0,3), (1,2)]:
self.net.edges.add(edge)
def test_as_pydot(self):
assert len(self.net.as_pydot().get_edges()) == 3, "Can convert to pydot graph instance."
def test_as_image(self):
filename = "testnet.png"
self.net.as_image(filename=filename)
file_exists = filename in os.listdir(".")
if file_exists:
os.remove("./" + filename)
assert file_exists, "Can create image file."
def test_as_dotstring(self):
assert self.net.as_dotstring() == self.expected_dotstring, "Create dot-formatted string"
def test_as_dotfile(self):
self.net.as_dotfile('testdotfile.txt')
assert open('testdotfile.txt').read() == self.expected_dotstring, "Create dotfile."
def test_as_string(self):
assert self.net.as_string() == self.expected_string, "Create string representation."
def test_layout(self):
self.net.layout()
assert hasattr(self.net, 'node_positions'), "Has node_positions"
assert len(self.net.node_positions[0]) == 2, "Node positions are 2 values (x and y)"
assert isinstance(self.net.node_positions[0][0], (int, float)), "Positions are in floats or ints"
class TestNetworkFromListOfEdges:
def setUp(self):
self.net = network.Network(
[data.DiscreteVariable(str(i),3) for i in xrange(6)],
[(0,1), (4,5), (2,3)]
)
def test_number_of_edges(self):
assert len(list(self.net.edges)) == 3
def test_edges_exist(self):
assert (0,1) in self.net.edges and \
(4,5) in self.net.edges and \
(2,3) in self.net.edges
class TestNetworkFromString(TestNetworkFromListOfEdges):
def setUp(self):
self.net = network.Network(
[data.DiscreteVariable(str(i),3) for i in xrange(6)],
"0,1;4,5;2,3"
)
class TestRandomNetwork:
def setUp(self):
self.nodes = [data.DiscreteVariable(str(i),3) for i in xrange(6)]
def test_acyclic(self):
net = network.random_network(self.nodes)
assert net.is_acyclic() == True, "Random network is acyclic."
def test_required_edges(self):
net = network.random_network(self.nodes, required_edges=[(0,1), (3,0)])
assert net.is_acyclic() == True and \
(0,1) in net.edges and \
(3,0) in net.edges
def test_prohibited_edges(self):
net = network.random_network(self.nodes, prohibited_edges=[(0,1), (3,0)])
assert net.is_acyclic() == True and \
(0,1) not in net.edges and \
(3,0) not in net.edges
def test_required_and_prohibited_edges(self):
net = network.random_network(self.nodes, required_edges=[(0,1), (3,0)],
prohibited_edges=[(2,3), (1,4)])
assert net.is_acyclic() == True and \
(0,1) in net.edges and \
(3,0) in net.edges and \
(2,3) not in net.edges and \
(1,4) not in net.edges
|
data/Theano/Theano/theano/compile/tests/test_debugmode.py
|
from __future__ import absolute_import, print_function, division
from nose.plugins.skip import SkipTest
import unittest
import numpy
from theano import config
from theano import gof
import theano
import theano.tensor
from theano.compat import exc_message
from theano.compile import debugmode
import theano.compile
from theano.tests import unittest_tools as utt
def test0():
x = theano.tensor.dvector()
f = theano.function([x], ((2. * x) + 7) / 2., mode=debugmode.DebugMode())
f([1, 2])
class BROKEN_ON_PURPOSE_Add(gof.Op):
__props__ = ("py_offset",)
def __init__(self, py_offset):
gof.Op.__init__(self)
self.py_offset = py_offset
def make_node(self, a, b):
a = theano.tensor.as_tensor_variable(a)
b = theano.tensor.as_tensor_variable(b)
assert a.type.dtype == 'float64'
assert a.type.dtype == b.type.dtype
assert a.type.ndim == 1
r = gof.Apply(self, [a, b], [a.type()])
return r
def perform(self, node, inp, out_):
a, b = inp
out, = out_
z = a + b
if self.py_offset:
out[0] = z + 0.5
else:
out[0] = z
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
a, b = inp
z, = out
return """
if (PyArray_NDIM(%(a)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 1"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 1) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 1"); %(fail)s;}
if (PyArray_DESCR(%(a)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_DOUBLE"); %(fail)s;}
if (PyArray_DESCR(%(b)s)->type_num != NPY_DOUBLE)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_DOUBLE"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different lengths"); %(fail)s;}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[] = {0};
dims[0] = PyArray_DIMS(%(b)s)[0];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, dims, PyArray_DESCR(%(b)s)->type_num);
}
{
for (npy_intp m = 0; m < PyArray_DIMS(%(z)s)[0]; ++m)
{
((double*)PyArray_GETPTR1(%(z)s, m))[0]
= 0.5
+ ((double*)PyArray_GETPTR1(%(a)s, m))[0]
+ ((double*)PyArray_GETPTR1(%(b)s, m))[0] ;
}
}
""" % dict(locals(), **sub)
inconsistent = BROKEN_ON_PURPOSE_Add(False)
off_by_half = BROKEN_ON_PURPOSE_Add(True)
class WeirdBrokenOp(gof.Op):
"""
This op can be inplace if behaviour is 'times1_inplace'
This op can be destructive if behaviour is 'times2_inplace'
In both cases, it does not set the destroy_map or view_map correctly so
it should raise an error in DebugMode.
"""
__props__ = ("behaviour", )
def __init__(self, behaviour):
gof.Op.__init__(self)
self.behaviour = behaviour
def make_node(self, a):
a_ = theano.tensor.as_tensor_variable(a)
r = gof.Apply(self, [a_], [a_.type()])
return r
def dontuse_perform(self, node, inp, out_):
a, = inp
out, = out_
if self.behaviour == 'times2':
out[0] = a * 2
elif self.behaviour == 'times2_inplace':
out[0] = a
out[0] *= 2
elif self.behaviour == 'times1':
out[0] = a * 1
elif self.behaviour == 'times1_inplace':
out[0] = a
else:
raise ValueError(self.behaviour)
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
a, = inp
z, = out
if "inplace" in self.behaviour:
z_code = """
{Py_XDECREF(%(z)s);}
Py_INCREF(%(a)s);
%(z)s = %(a)s;
"""
else:
z_code = """
{Py_XDECREF(%(z)s);}
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, PyArray_DIMS(%(a)s), PyArray_DESCR(%(a)s)->type_num);
"""
prep_vars = """
//the output array has size M x N
npy_intp M = PyArray_DIMS(%(a)s)[0];
npy_intp Sa = PyArray_STRIDES(%(a)s)[0] / PyArray_DESCR(%(a)s)->elsize;
npy_intp Sz = PyArray_STRIDES(%(z)s)[0] / PyArray_DESCR(%(z)s)->elsize;
npy_double * Da = (npy_double*)PyArray_BYTES(%(a)s);
npy_double * Dz = (npy_double*)PyArray_BYTES(%(z)s);
//clear the output array
for (npy_intp m = 0; m < M; ++m)
{
"""
if self.behaviour == 'times2':
behaviour = " Dz[m * Sz] = 2 * Da[m * Sa]; "
elif self.behaviour == 'times2_inplace':
behaviour = " Dz[m * Sz] = 2 * Da[m * Sa]; "
elif self.behaviour == 'times1':
behaviour = " Dz[m * Sz] = Da[m * Sa]; "
elif self.behaviour == 'times1_inplace':
behaviour = ""
else:
raise ValueError(self.behaviour)
prep_vars2 = """
}
"""
total = ((z_code + prep_vars + behaviour + prep_vars2)
% dict(locals(), **sub))
return total
wb2i = WeirdBrokenOp('times2_inplace')
wb2 = WeirdBrokenOp('times2')
wb1i = WeirdBrokenOp('times1_inplace')
wb1 = WeirdBrokenOp('times1')
def test_badthunkoutput():
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f_good = theano.function([a, b],
off_by_half(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx))
f_inconsistent = theano.function([a, b],
inconsistent(a, b),
mode=debugmode.DebugMode(check_c_code=theano.config.cxx))
f_good([1.0, 2.0, 3.0], [2, 3, 4])
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
try:
f_inconsistent([1.0, 2.0, 3.0], [2, 3, 4])
except debugmode.BadThunkOutput as e:
assert e.r.owner.op is inconsistent
return
assert False
def test_badoptimization():
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add(node):
if node.op == theano.tensor.add:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register('insert_broken_add', insert_broken_add, 'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b,
mode=debugmode.DebugMode(optimizer=opt))
try:
f([1.0, 2.0, 3.0], [2, 3, 4],)
except debugmode.BadOptimization as e:
assert str(e.reason) == 'insert_broken_add'
return
assert False
def test_badoptimization_opt_err():
"""This variant of test_badoptimization() replace the working code
with a new apply node that will raise an error.
"""
@gof.local_optimizer([theano.tensor.add])
def insert_bigger_b_add(node):
if node.op == theano.tensor.add:
inputs = list(node.inputs)
if inputs[-1].owner is None:
inputs[-1] = theano.tensor.concatenate((inputs[-1],
inputs[-1]))
return [node.op(*inputs)]
return False
edb = gof.EquilibriumDB()
edb.register('insert_bigger_b_add', insert_bigger_b_add, 'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
f = theano.function([a, b], a + b,
mode=debugmode.DebugMode(optimizer=opt))
try:
f([1.0, 2.0, 3.0], [2, 3, 4],)
except Exception as e:
assert 'insert_bigger_b_add' in exc_message(e)
return
assert False
def test_stochasticoptimization():
last_time_replaced = [False]
@gof.local_optimizer([theano.tensor.add])
def insert_broken_add_sometimes(node):
if node.op == theano.tensor.add:
last_time_replaced[0] = not last_time_replaced[0]
if last_time_replaced[0]:
return [off_by_half(*node.inputs)]
return False
edb = gof.EquilibriumDB()
edb.register(
'insert_broken_add_sometimes',
insert_broken_add_sometimes,
'all')
opt = edb.query('+all')
a = theano.tensor.dvector()
b = theano.tensor.dvector()
try:
theano.function([a, b],
theano.tensor.add(a, b),
mode=debugmode.DebugMode(
optimizer=opt,
check_c_code=True,
stability_patience=max(2, config.DebugMode.patience)))
except debugmode.StochasticOrder:
return
assert False
def test_just_c_code():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb2(x),
mode=debugmode.DebugMode(check_py_code=False))
assert numpy.all(f([1, 2]) == [2, 4])
def test_baddestroymap():
class BadAdd(gof.Op):
def make_node(self, a, b):
c = a.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
c, = out
c[0] = a
c[0] += b
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], BadAdd()(x, y), mode='DEBUG_MODE')
try:
f([1, 2], [3, 4])
assert False
except debugmode.BadDestroyMap:
pass
def test_baddestroymap_c():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb2i(x),
mode=debugmode.DebugMode(check_py_code=False))
try:
assert numpy.all(f([1, 2]) == [2, 4])
assert False
except debugmode.BadDestroyMap:
pass
class Test_ViewMap(unittest.TestCase):
class BadAddRef(gof.Op):
def make_node(self, a, b):
c = b.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
c, = out
c[0] = b
class BadAddSlice(gof.Op):
def make_node(self, a, b):
c = b.type()
return gof.Apply(self, [a, b], [c])
def perform(self, node, inp, out):
a, b = inp
c, = out
c[0] = b[1:3]
def test_badviewmap_ref(self):
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddRef()(x, y), mode='DEBUG_MODE')
try:
f([1, 2], [3, 4])
assert False
except debugmode.BadViewMap:
return
def test_badviewmap_slice(self):
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], self.BadAddSlice()(x, y),
mode='DEBUG_MODE')
try:
f([1, 2], [3, 4])
assert False
except debugmode.BadViewMap:
return
def test_goodviewmap(self):
goodop = self.BadAddRef()
goodop.view_map = {0: [1]}
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], goodop(x, y), mode='DEBUG_MODE')
try:
f([1, 5, 1], [3, 4, 2, 1, 4])
return
except debugmode.BadViewMap:
assert False
def test_badviewmap_c(self):
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
x = theano.tensor.dvector()
f = theano.function([x], wb1i(x),
mode=debugmode.DebugMode(check_py_code=False))
try:
f([1, 2])
assert False
except debugmode.BadViewMap:
pass
def test_aliased_outputs_ok(self):
class CustomOp(gof.Op):
view_map = {0: [0], 1: [0]}
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
c[0] = a
d[0] = a[1:]
x = theano.tensor.dvector('x')
y = theano.tensor.dvector('y')
f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [1, 2, 3, 4])
assert numpy.all(r1 == [2, 3, 4])
def test_aliased_outputs_ok_output(self):
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 2
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector()
y = theano.tensor.dvector()
f = theano.function([x, y], CustomOp()(x, y), mode='DEBUG_MODE')
r0, r1 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
assert numpy.all(r1 == [4, 6, 8])
def test_aliased_outputs_ok_shadow(self):
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 1
c[0] = r
d[0] = r[1:]
x = theano.tensor.dvector('x')
y = theano.tensor.dvector('y')
f = theano.function([x, y], CustomOp()(x, y)[0] * 2, mode='DEBUG_MODE')
r0 = f([1, 2, 3, 4], [5, 6, 7, 8])
assert numpy.all(r0 == [2, 4, 6, 8])
def test_aliased_outputs_bad(self):
class CustomOp(gof.Op):
def make_node(self, a, b):
c = a.type()
d = a.type()
return gof.Apply(self, [a, b], [c, d])
def perform(self, node, inp, out):
a, b = inp
c, d = out
r = a * 1
c[0] = r[:-1]
d[0] = r[1:]
custom_op = CustomOp()
x = theano.tensor.dvector()
y = theano.tensor.dvector()
bad_xy0, bad_xy1 = custom_op(x, y)
out = bad_xy0 * 2 + bad_xy1 * 2
f = theano.function([x, y], out, mode='DEBUG_MODE')
try:
f([1, 2, 3, 4], [5, 6, 7, 8])
assert False
except debugmode.BadViewMap:
pass
class Test_check_isfinite(unittest.TestCase):
def setUp(self):
self.old_ts = theano.tensor.TensorType.filter_checks_isfinite
self.old_dm = theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite
def tearDown(self):
theano.tensor.TensorType.filter_checks_isfinite = self.old_ts
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = self.old_dm
def test_check_isfinite(self):
x = theano.tensor.vector()
f = theano.function([x], (x + 2) * 5, mode='DEBUG_MODE')
g = theano.function([x], theano.tensor.log(x), mode='DEBUG_MODE')
f(numpy.log([3, 4, 5]).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
numpy.log([3, -4, 5]).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([0, 1.0, 0]) / 0).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, f,
(numpy.asarray([1.0, 1.0, 1.0]) / 0).astype(config.floatX))
self.assertRaises(debugmode.InvalidValueError, g,
numpy.asarray([3, -4, 5], dtype=config.floatX))
theano.tensor.TensorType.filter_checks_isfinite = False
theano.compile.mode.predefined_modes[
'DEBUG_MODE'].check_isfinite = False
f(numpy.asarray(numpy.asarray([1.0, 1.0, 1.0]) / 0,
dtype=config.floatX))
def test_check_isfinite_disabled(self):
x = theano.tensor.dvector()
f = theano.function([x], (x + 2) * 5,
mode=debugmode.DebugMode(check_isfinite=False))
f(numpy.log([3, -4, 5]))
infs = numpy.asarray([1.0, 1., 1.]) / 0
f(infs)
return
class BrokenCImplementationAdd(gof.Op):
__props__ = ()
def make_node(self, a, b):
a = theano.tensor.as_tensor_variable(a)
b = theano.tensor.as_tensor_variable(b)
assert a.type.dtype == 'float32'
assert a.type.dtype == b.type.dtype
assert a.type.ndim == 2
r = gof.Apply(self, [a, b], [a.type()])
return r
def perform(self, node, inp, out_):
a, b = inp
out, = out_
z = a + b
out[0] = z
def c_code_cache_version(self):
return (1,)
def c_code(self, node, name, inp, out, sub):
a, b = inp
z, = out
debug = 0
return """
//printf("executing c_code\\n");
if (PyArray_NDIM(%(a)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(a) != 2"); %(fail)s;}
if (PyArray_NDIM(%(b)s) != 2) {PyErr_SetString(PyExc_NotImplementedError, "rank(b) != 2"); %(fail)s;}
if (PyArray_DESCR(%(a)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "a dtype not NPY_FLOAT"); %(fail)s;}
if (PyArray_DESCR(%(b)s)->type_num != NPY_FLOAT)
{PyErr_SetString(PyExc_NotImplementedError, "b's dtype not NPY_FLOAT"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(a)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "a is not square"); %(fail)s;}
if (PyArray_DIMS(%(b)s)[0] != PyArray_DIMS(%(b)s)[1])
{PyErr_SetString(PyExc_NotImplementedError, "b is not square"); %(fail)s;}
if (PyArray_DIMS(%(a)s)[0] != PyArray_DIMS(%(b)s)[0])
{PyErr_SetString(PyExc_NotImplementedError, "a and b have different dimensions"); %(fail)s;}
// We do not check for c_contiguous property here
if (%(debug)s)
{
if (!%(z)s)
printf("%(z)s is not there, %%p \\n", %(z)s);
else if (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
printf("Dimension 0 mismatch for %(z)s and %(b)s\\n");
else if (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
printf("Dimension 1 mismatch for %(z)s and %(b)s\\n");
else
printf("Reusing %(z)s\\n");
}
if ((!%(z)s)
|| (PyArray_DIMS(%(z)s)[0] != PyArray_DIMS(%(b)s)[0])
|| (PyArray_DIMS(%(z)s)[1] != PyArray_DIMS(%(b)s)[1])
)
{
Py_XDECREF(%(z)s);
npy_intp dims[] = {0, 0};
dims[0] = PyArray_DIMS(%(b)s)[0];
dims[1] = PyArray_DIMS(%(b)s)[1];
%(z)s = (PyArrayObject*) PyArray_SimpleNew(2, dims, PyArray_DESCR(%(b)s)->type_num);
}
// Let us assume that %(z)s is c_contiguous
{
dtype_%(z)s * z = ((dtype_%(z)s*)(PyArray_GETPTR2(%(z)s,0,0)));
for (int i=0; i<PyArray_DIMS(%(b)s)[0]; i++)
{
for (int j=0; j<PyArray_DIMS(%(b)s)[1]; j++)
{
*z = ((float*)PyArray_GETPTR2(%(a)s, i, j))[0] +
((float*)PyArray_GETPTR2(%(b)s, i, j))[0] ;
z++;
}
}
}
""" % dict(locals(), **sub)
class VecAsRowAndCol(gof.Op):
"""
Transforms a vector into a row and a column.
This Op exists to check everything is correct when an Op has
two outputs with different broadcasting patterns.
"""
__props__ = ()
def make_node(self, v):
if not isinstance(v, gof.Variable):
v = theano.tensor.as_tensor_variable(v)
assert v.type.ndim == 1
type_class = type(v.type)
out_r_type = type_class(dtype=v.dtype, broadcastable=(True, False))
out_c_type = type_class(dtype=v.dtype, broadcastable=(False, True))
return gof.Apply(self, [v], [out_r_type(), out_c_type()])
def perform(self, node, inp, out):
v, = inp
r, c = out
lv = v.shape[0]
if (r[0] is None) or (r[0].shape != (1, lv)):
r[0] = node.outputs[0].type.value_zeros((1, lv))
if (c[0] is None) or (c[0].shape != (lv, 1)):
c[0] = node.outputs[1].type.value_zeros((lv, 1))
for i in range(lv):
r[0][0, i] = v[i]
c[0][i, 0] = v[i]
class Test_preallocated_output(unittest.TestCase):
def setUp(self):
self.rng = numpy.random.RandomState(seed=utt.fetch_seed())
def test_f_contiguous(self):
a = theano.tensor.fmatrix('a')
b = theano.tensor.fmatrix('b')
z = BrokenCImplementationAdd()(a, b)
out = theano.tensor.dot(z, numpy.eye(7))
a_val = self.rng.randn(7, 7).astype('float32')
b_val = self.rng.randn(7, 7).astype('float32')
mode = debugmode.DebugMode(
check_preallocated_output=['c_contiguous'])
f = theano.function([a, b], out, mode=mode)
f(a_val, b_val)
mode = debugmode.DebugMode(
check_preallocated_output=['f_contiguous'])
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
self.assertRaises(debugmode.BadThunkOutput, f, a_val, b_val)
else:
f(a_val, b_val)
def test_f_contiguous_out(self):
a = theano.tensor.fmatrix('a')
b = theano.tensor.fmatrix('b')
out = BrokenCImplementationAdd()(a, b)
a_val = self.rng.randn(7, 7).astype('float32')
b_val = self.rng.randn(7, 7).astype('float32')
mode = debugmode.DebugMode(
check_preallocated_output=['c_contiguous'])
f = theano.function([a, b], out, mode=mode)
f(a_val, b_val)
mode = debugmode.DebugMode(
check_preallocated_output=['f_contiguous'])
f = theano.function([a, b], out, mode=mode)
if theano.config.cxx:
self.assertRaises(debugmode.BadThunkOutput, f, a_val, b_val)
else:
f(a_val, b_val)
def test_output_broadcast_tensor(self):
v = theano.tensor.fvector('v')
c, r = VecAsRowAndCol()(v)
f = theano.function([v], [c, r])
v_val = self.rng.randn(5).astype('float32')
f(v_val)
def test_output_broadcast_cuda(self):
from theano.sandbox import cuda
if not cuda.cuda_available:
raise SkipTest("Optional package Cuda disabled")
if cuda.use.device_number is None:
cuda.use("gpu",
force=True,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False)
v = cuda.fvector('v')
c, r = VecAsRowAndCol()(v)
f = theano.function([v], [c, r])
v_val = cuda.CudaNdarray(self.rng.randn(5).astype('float32'))
f(v_val)
|
data/Yelp/paasta/tests/cli/fsm/autosuggest_test.py
|
from contextlib import nested
import mock
from pytest import raises
from paasta_tools.cli.fsm import autosuggest
class TestGetSmartstackProxyPortFromFile:
def test_multiple_stanzas_per_file(self):
with nested(
mock.patch("__builtin__.open", autospec=True),
mock.patch("paasta_tools.cli.fsm.autosuggest.yaml", autospec=True),
) as (
mock_open,
mock_yaml,
):
mock_yaml.load.return_value = {
"main": {
"proxy_port": 1,
},
"foo": {
"proxy_port": 2,
},
}
actual = autosuggest._get_smartstack_proxy_port_from_file(
"fake_root",
"smartstack.yaml",
)
assert actual == 2
class TestSuggestSmartstackProxyPort:
def test_suggest_smartstack_proxy_port(self):
yelpsoa_config_root = "fake_yelpsoa_config_root"
walk_return = [
("fake_root1", "fake_dir1", ["service.yaml"]),
("fake_root2", "fake_dir2", ["smartstack.yaml"]),
("fake_root3", "fake_dir3", ["service.yaml"]),
]
mock_walk = mock.Mock(return_value=walk_return)
get_smartstack_proxy_port_from_file_returns = [
20001,
20002,
55555,
]
def get_smarstack_proxy_port_from_file_side_effect(*args):
return get_smartstack_proxy_port_from_file_returns.pop(0)
mock_get_smartstack_proxy_port_from_file = mock.Mock(side_effect=get_smarstack_proxy_port_from_file_side_effect)
with nested(
mock.patch("os.walk", mock_walk),
mock.patch("paasta_tools.cli.fsm.autosuggest._get_smartstack_proxy_port_from_file",
mock_get_smartstack_proxy_port_from_file),
):
actual = autosuggest.suggest_smartstack_proxy_port(yelpsoa_config_root, range_min=20001, range_max=20003)
assert mock_get_smartstack_proxy_port_from_file.call_count == 3
assert actual == 20003
def test_suggest_smartstack_proxy_port_too_many_services(self):
"""If all the ports are taken, we should raise an error"""
yelpsoa_config_root = "fake_yelpsoa_config_root"
walk_return = [
("fake_root1", "fake_dir1", ["service.yaml"]),
("fake_root2", "fake_dir2", ["smartstack.yaml"]),
("fake_root3", "fake_dir3", ["service.yaml"]),
]
mock_walk = mock.Mock(return_value=walk_return)
get_smartstack_proxy_port_from_file_returns = [
20001,
20002,
55555,
]
def get_smarstack_proxy_port_from_file_side_effect(*args):
return get_smartstack_proxy_port_from_file_returns.pop(0)
mock_get_smartstack_proxy_port_from_file = mock.Mock(side_effect=get_smarstack_proxy_port_from_file_side_effect)
with nested(
mock.patch("os.walk", mock_walk),
mock.patch("paasta_tools.cli.fsm.autosuggest._get_smartstack_proxy_port_from_file",
mock_get_smartstack_proxy_port_from_file),
):
with raises(Exception) as exc:
autosuggest.suggest_smartstack_proxy_port(yelpsoa_config_root, range_min=20001,
range_max=20002)
assert "There are no more ports available in the range [20001, 20002]" == str(exc.value)
|
data/SparkPost/python-sparkpost/examples/suppression_list/update_suppression_enty.py
|
from sparkpost import SparkPost
sp = SparkPost()
result = sp.suppression_list.update({
'email': 'test@test.com',
'transactional': False,
'non_transactional': True,
'description': 'Test description'
})
print(result)
|
data/UDST/activitysim/activitysim/defaults/tables/__init__.py
|
import households
import persons
import landuse
import skims
import accessibility
import tours
import size_terms
|
data/Pylons/pylons/tests/test_webapps/filestotest/helpers_sample.py
|
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to both as 'h'.
"""
|
data/Netflix/security_monkey/security_monkey/tests/test_elasticsearch_service.py
|
"""
.. module: security_monkey.tests.test_elasticsearch_service
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <mgrima@netflix.com>
"""
import json
from security_monkey.datastore import NetworkWhitelistEntry, Account
from security_monkey.tests import SecurityMonkeyTestCase
from security_monkey import db
from security_monkey.watchers.elasticsearch_service import ElasticSearchServiceItem
CONFIG_ONE = {
"name": "es_test",
"policy": json.loads(b"""{
"Statement": [
{
"Action": "es:*",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test/*",
"Sid": ""
}
],
"Version": "2012-10-17"
}
""")
}
CONFIG_TWO = {
"name": "es_test_2",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:*",
"Resource": "arn:aws:es:us-west-2:012345678910:domain/es_test_2/*"
}
]
}
""")
}
CONFIG_THREE = {
"name": "es_test_3",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_3/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_3/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"10.0.0.1/8"
]
}
}
}
]
}
""")
}
CONFIG_FOUR = {
"name": "es_test_4",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test_4/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test_4/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"0.0.0.0/0"
]
}
}
}
]
}
""")
}
CONFIG_FIVE = {
"name": "es_test_5",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::012345678910:root"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test_5/*"
},
{
"Sid": "",
"Effect": "Deny",
"Principal": {
"AWS": "arn:aws:iam::012345678910:role/not_this_role"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test_5/*"
}
]
}
""")
}
CONFIG_SIX = {
"name": "es_test_6",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::012345678910:role/a_good_role"
},
"Action": "es:*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_6/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_6/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"100.0.0.1/16"
]
}
}
}
]
}
""")
}
CONFIG_SEVEN = {
"name": "es_test_7",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::012345678910:role/a_good_role"
},
"Action": "es:*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_7/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_7/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.200/32",
"10.0.0.1/8"
]
}
}
}
]
}
""")
}
CONFIG_EIGHT = {
"name": "es_test_8",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "es:*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_8/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": "*",
"Action": "es:ESHttp*",
"Resource": "arn:aws:es:eu-west-1:012345678910:domain/es_test_8/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": [
"192.168.1.1/32",
"100.0.0.1/16"
]
}
}
}
]
}
""")
}
CONFIG_NINE = {
"name": "es_test_9",
"policy": json.loads(b"""{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::111111111111:root"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:012345678910:domain/es_test_9/*"
}
]
}
""")
}
WHITELIST_CIDRS = [
("Test one", "192.168.1.1/32"),
("Test two", "100.0.0.1/16"),
]
class ElasticSearchServiceTestCase(SecurityMonkeyTestCase):
def setUp(self):
self.es_items = [
ElasticSearchServiceItem(region="us-east-1", account="TEST_ACCOUNT", name="es_test", config=CONFIG_ONE),
ElasticSearchServiceItem(region="us-west-2", account="TEST_ACCOUNT", name="es_test_2", config=CONFIG_TWO),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_3", config=CONFIG_THREE),
ElasticSearchServiceItem(region="us-east-1", account="TEST_ACCOUNT", name="es_test_4", config=CONFIG_FOUR),
ElasticSearchServiceItem(region="us-east-1", account="TEST_ACCOUNT", name="es_test_5", config=CONFIG_FIVE),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_6", config=CONFIG_SIX),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_7", config=CONFIG_SEVEN),
ElasticSearchServiceItem(region="eu-west-1", account="TEST_ACCOUNT", name="es_test_8", config=CONFIG_EIGHT),
ElasticSearchServiceItem(region="us-east-1", account="TEST_ACCOUNT", name="es_test_9", config=CONFIG_NINE),
]
test_account = Account()
test_account.name = "TEST_ACCOUNT"
test_account.notes = "TEST ACCOUNT"
test_account.s3_name = "TEST_ACCOUNT"
test_account.number = "012345678910"
test_account.role_name = "TEST_ACCOUNT"
db.session.add(test_account)
db.session.commit()
def tearDown(self):
test_account = Account.query.filter(Account.number == "012345678910").first()
if test_account is not None:
db.session.delete(test_account)
db.session.commit()
def test_es_auditor(self):
from security_monkey.auditors.elasticsearch_service import ElasticSearchServiceAuditor
es_auditor = ElasticSearchServiceAuditor(accounts=["012345678910"])
es_auditor.network_whitelist = []
for cidr in WHITELIST_CIDRS:
whitelist_cidr = NetworkWhitelistEntry()
whitelist_cidr.cidr = cidr[1]
whitelist_cidr.name = cidr[0]
es_auditor.network_whitelist.append(whitelist_cidr)
for es_domain in self.es_items:
es_auditor.check_es_access_policy(es_domain)
self.assertEquals(len(self.es_items[0].audit_issues), 1)
self.assertEquals(self.es_items[0].audit_issues[0].score, 20)
self.assertEquals(len(self.es_items[1].audit_issues), 1)
self.assertEquals(self.es_items[1].audit_issues[0].score, 20)
self.assertEquals(len(self.es_items[2].audit_issues), 2)
self.assertEquals(self.es_items[2].audit_issues[0].score, 5)
self.assertEquals(self.es_items[2].audit_issues[1].score, 7)
self.assertEquals(len(self.es_items[3].audit_issues), 1)
self.assertEquals(self.es_items[3].audit_issues[0].score, 20)
self.assertEquals(len(self.es_items[4].audit_issues), 0)
self.assertEquals(len(self.es_items[5].audit_issues), 0)
self.assertEquals(len(self.es_items[6].audit_issues), 3)
self.assertEquals(self.es_items[6].audit_issues[0].score, 5)
self.assertEquals(self.es_items[6].audit_issues[1].score, 5)
self.assertEquals(self.es_items[6].audit_issues[2].score, 7)
self.assertEquals(len(self.es_items[7].audit_issues), 1)
self.assertEquals(self.es_items[7].audit_issues[0].score, 20)
self.assertEquals(len(self.es_items[8].audit_issues), 2)
self.assertEquals(self.es_items[8].audit_issues[0].score, 6)
self.assertEquals(self.es_items[8].audit_issues[1].score, 10)
|
data/adafruit/Adafruit_Python_CharLCD/examples/char_lcd.py
|
import time
import Adafruit_CharLCD as LCD
lcd_rs = 27
lcd_en = 22
lcd_d4 = 25
lcd_d5 = 24
lcd_d6 = 23
lcd_d7 = 18
lcd_backlight = 4
lcd_columns = 16
lcd_rows = 2
lcd = LCD.Adafruit_CharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_backlight)
lcd.message('Hello\nworld!')
time.sleep(5.0)
lcd.clear()
lcd.show_cursor(True)
lcd.message('Show cursor')
time.sleep(5.0)
lcd.clear()
lcd.blink(True)
lcd.message('Blink cursor')
time.sleep(5.0)
lcd.show_cursor(False)
lcd.blink(False)
lcd.clear()
message = 'Scroll'
lcd.message(message)
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_right()
for i in range(lcd_columns-len(message)):
time.sleep(0.5)
lcd.move_left()
lcd.clear()
lcd.message('Flash backlight\nin 5 seconds...')
time.sleep(5.0)
lcd.set_backlight(0)
time.sleep(2.0)
lcd.clear()
lcd.message('Goodbye!')
lcd.set_backlight(1)
|
data/aarongarrett/inspyred/examples/standard/sa_example.py
|
from random import Random
from time import time
import inspyred
def main(prng=None, display=False):
if prng is None:
prng = Random()
prng.seed(time())
problem = inspyred.benchmarks.Sphere(2)
ea = inspyred.ec.SA(prng)
ea.terminator = inspyred.ec.terminators.evaluation_termination
final_pop = ea.evolve(evaluator=problem.evaluator,
generator=problem.generator,
maximize=problem.maximize,
bounder=problem.bounder,
max_evaluations=30000)
if display:
best = max(final_pop)
print('Best Solution: \n{0}'.format(str(best)))
return ea
if __name__ == '__main__':
main(display=True)
|
data/JelteF/PyLaTeX/tests/test_quantities.py
|
import quantities as pq
from pylatex.quantities import _dimensionality_to_siunitx, Quantity
def test_quantity():
v = 1 * pq.m/pq.s
q1 = Quantity(v)
assert q1.dumps() == r'\SI{1.0}{\meter\per\second}'
q2 = Quantity(v, format_cb=lambda x: str(int(x)))
assert q2.dumps() == r'\SI{1}{\meter\per\second}'
q3 = Quantity(v, options={'zero-decimal-to-integer': 'true'})
ref = r'\SI[zero-decimal-to-integer=true]{1.0}{\meter\per\second}'
assert q3.dumps() == ref
def test_quantity_float():
q1 = Quantity(42.0)
assert q1.dumps() == r'\num{42.0}'
def test_quantity_uncertain():
t = pq.UncertainQuantity(7., pq.second, 1.)
q1 = Quantity(t)
assert q1.dumps() == r'\SI{7.0 +- 1.0}{\second}'
def test_dimensionality_to_siunitx():
assert _dimensionality_to_siunitx((pq.volt/pq.kelvin).dimensionality) == \
r'\volt\per\Kelvin'
if __name__ == '__main__':
test_quantity()
test_dimensionality_to_siunitx()
|
data/KimiNewt/pyshark/tests/conftest.py
|
import os
import logbook
import pytest
import pyshark
@pytest.fixture
def caps_directory():
return os.path.join(os.path.dirname(__file__), 'caps')
@pytest.fixture
def lazy_simple_capture(request, caps_directory):
"""
Does not fill the cap with packets.
"""
cap_path = os.path.join(caps_directory, 'capture_test.pcapng')
cap = pyshark.FileCapture(cap_path)
cap.log.level = logbook.DEBUG
def finalizer():
cap.close()
cap.eventloop.stop()
request.addfinalizer(finalizer)
return cap
@pytest.fixture
def simple_capture(lazy_simple_capture):
"""
A capture already full of packets
"""
lazy_simple_capture.load_packets()
return lazy_simple_capture
|
data/adaptivdesign/django-sellmo/sellmo/contrib/shipping/methods/tiered_shipping/configure.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sellmo.contrib.settings import settings_manager
from .constants import MAX_TIER_ATTRIBUTES
group = _("Tiered Shipping")
for i in range(MAX_TIER_ATTRIBUTES):
settings_manager.add_setting(
'shipping_tier_attribute{0}'.format(i + 1),
models.ForeignKey(
'attribute.Attribute',
null=True,
blank=True,
related_name='+'
),
group
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.