text stringlengths 0 1.05M | meta dict |
|---|---|
__all__ = ['Uniform']
class Uniform:
'''
A uniform is a global GLSL variable declared with the "uniform" storage qualifier.
These act as parameters that the user of a shader program can pass to that program.
In ModernGL, Uniforms can be accessed using :py:meth:`Program.__getitem__`
or :py:meth:`Program.__iter__`
'''
__slots__ = ['mglo', '_location', '_array_length', '_dimension', '_name', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._location = None
self._array_length = None
self._dimension = None
self._name = None
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<Uniform: %d>' % self._location
@property
def location(self) -> int:
'''
int: The location of the uniform.
The location holds the value returned by the glGetUniformLocation.
To set the value of the uniform use the :py:attr:`value` instead.
'''
return self._location
@property
def dimension(self) -> int:
'''
int: The dimension of the uniform.
+-----------------+-----------+
| GLSL type | dimension |
+=================+===========+
| sampler2D | 1 |
+-----------------+-----------+
| sampler2DCube | 1 |
+-----------------+-----------+
| sampler2DShadow | 1 |
+-----------------+-----------+
| bool | 1 |
+-----------------+-----------+
| bvec2 | 2 |
+-----------------+-----------+
| bvec3 | 3 |
+-----------------+-----------+
| bvec4 | 4 |
+-----------------+-----------+
| int | 1 |
+-----------------+-----------+
| ivec2 | 2 |
+-----------------+-----------+
| ivec3 | 3 |
+-----------------+-----------+
| ivec4 | 4 |
+-----------------+-----------+
| uint | 1 |
+-----------------+-----------+
| uvec2 | 2 |
+-----------------+-----------+
| uvec3 | 3 |
+-----------------+-----------+
| uvec4 | 4 |
+-----------------+-----------+
| float | 1 |
+-----------------+-----------+
| vec2 | 2 |
+-----------------+-----------+
| vec3 | 3 |
+-----------------+-----------+
| vec4 | 4 |
+-----------------+-----------+
| double | 1 |
+-----------------+-----------+
| dvec2 | 2 |
+-----------------+-----------+
| dvec3 | 3 |
+-----------------+-----------+
| dvec4 | 4 |
+-----------------+-----------+
| mat2 | 4 |
+-----------------+-----------+
| mat2x3 | 6 |
+-----------------+-----------+
| mat2x4 | 8 |
+-----------------+-----------+
| mat3x2 | 6 |
+-----------------+-----------+
| mat3 | 9 |
+-----------------+-----------+
| mat3x4 | 12 |
+-----------------+-----------+
| mat4x2 | 8 |
+-----------------+-----------+
| mat4x3 | 12 |
+-----------------+-----------+
| mat4 | 16 |
+-----------------+-----------+
| dmat2 | 4 |
+-----------------+-----------+
| dmat2x3 | 6 |
+-----------------+-----------+
| dmat2x4 | 8 |
+-----------------+-----------+
| dmat3x2 | 6 |
+-----------------+-----------+
| dmat3 | 9 |
+-----------------+-----------+
| dmat3x4 | 12 |
+-----------------+-----------+
| dmat4x2 | 8 |
+-----------------+-----------+
| dmat4x3 | 12 |
+-----------------+-----------+
| dmat4 | 16 |
+-----------------+-----------+
'''
return self._dimension
@property
def array_length(self) -> int:
'''
int: The length of the array of the uniform.
The array_length is `1` for non array uniforms.
'''
return self._array_length
@property
def name(self) -> str:
'''
str: The name of the uniform.
The name does not contain leading `[0]`.
The name may contain `[ ]` when the uniform is part of a struct.
'''
return self._name
@property
def value(self):
'''
The value of the uniform.
Reading the value of the uniform may force the GPU to sync.
The value must be a tuple for non array uniforms.
The value must be a list of tuples for array uniforms.
'''
return self.mglo.value
@value.setter
def value(self, value):
self.mglo.value = value
def read(self) -> bytes:
'''
Read the value of the uniform.
'''
return self.mglo.data
def write(self, data) -> None:
'''
Write the value of the uniform.
'''
self.mglo.data = data
| {
"repo_name": "cprogrammer1994/ModernGL",
"path": "moderngl/program_members/uniform.py",
"copies": "1",
"size": "6211",
"license": "mit",
"hash": -2388784603019592700,
"line_mean": 33.893258427,
"line_max": 91,
"alpha_frac": 0.2680727741,
"autogenerated": false,
"ratio": 5.386816999132697,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6154889773232698,
"avg_score": null,
"num_lines": null
} |
__all__ = ['unravel_index',
'mgrid',
'ogrid',
'r_', 'c_', 's_',
'index_exp', 'ix_',
'ndenumerate','ndindex',
'fill_diagonal','diag_indices','diag_indices_from']
import sys
import numpy.core.numeric as _nx
from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
arange )
from numpy.core.numerictypes import find_common_type
import math
import function_base
import numpy.matrixlib as matrix
from function_base import diff
makemat = matrix.matrix
# contributed by Stefan van der Walt
def unravel_index(x,dims):
"""
Convert a flat index to an index tuple for an array of given shape.
Parameters
----------
x : int
Flattened index.
dims : tuple of ints
Input shape, the shape of an array into which indexing is
required.
Returns
-------
idx : tuple of ints
Tuple of the same shape as `dims`, containing the unraveled index.
Notes
-----
In the Examples section, since ``arr.flat[x] == arr.max()`` it may be
easier to use flattened indexing than to re-map the index to a tuple.
Examples
--------
>>> arr = np.arange(20).reshape(5, 4)
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> x = arr.argmax()
>>> x
19
>>> dims = arr.shape
>>> idx = np.unravel_index(x, dims)
>>> idx
(4, 3)
>>> arr[idx] == arr.max()
True
"""
if x > _nx.prod(dims)-1 or x < 0:
raise ValueError("Invalid index, must be 0 <= x <= number of elements.")
idx = _nx.empty_like(dims)
# Take dimensions
# [a,b,c,d]
# Reverse and drop first element
# [d,c,b]
# Prepend [1]
# [1,d,c,b]
# Calculate cumulative product
# [1,d,dc,dcb]
# Reverse
# [dcb,dc,d,1]
dim_prod = _nx.cumprod([1] + list(dims)[:0:-1])[::-1]
# Indices become [x/dcb % a, x/dc % b, x/d % c, x/1 % d]
return tuple(x/dim_prod % dims)
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
baseshape = [1]*nd
for k in range(nd):
new = _nx.asarray(args[k])
if (new.ndim != 1):
raise ValueError, "Cross index must be 1 dimensional"
if issubclass(new.dtype.type, _nx.bool_):
new = new.nonzero()[0]
baseshape[k] = len(new)
new = new.reshape(tuple(baseshape))
out.append(new)
baseshape[k] = 1
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
``grid = nd_grid()`` creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1.
Parameters
----------
sparse : bool, optional
Whether the grid is sparse or not. Default is False.
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
`mgrid` and `ogrid`::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self,key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(math.ceil((key[k].stop - start)/(step*1.0)))
if isinstance(step, float) or \
isinstance(start, float) or \
isinstance(key[k].stop, float):
typ = float
if self.sparse:
nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \
(typ,)*len(size))
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None,None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None: start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop+step
return _nx.arange(0, length,1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self,i,j):
return _nx.arange(i,j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
"""
Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see `r_`.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self,key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key,frame.f_globals,frame.f_locals)
return mymat
if type(key) is not tuple:
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if type(key[k]) is slice:
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None: start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj,copy=False,ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1,trans1d)
elif isinstance(key[k],str):
if k != 0:
raise ValueError, "special directives must be the"\
"first entry."
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError, "unknown special directive"
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError, "unknown special directive"
elif type(key[k]) in ScalarType:
newobj = array(key[k],ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = range(ndmin)
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs),axis=self.axis)
return self._retval(res)
def __getslice__(self,i,j):
res = _nx.arange(i,j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly. There are two use cases.
1. If the index expression contains comma separated arrays, then stack
them along their first axis.
2. If the index expression contains slice notation or scalars then create
a 1-D array with a range indicated by the slice notation.
If slice notation is used, the syntax ``start:stop:step`` is equivalent
to ``np.arange(start, stop, step)`` inside of the brackets. However, if
``step`` is an imaginary number (i.e. 100j) then its integer portion is
interpreted as a number-of-points desired and the start and stop are
inclusive. In other words ``start:stop:stepj`` is interpreted as
``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
After expansion of slice notation, all comma separated sequences are
concatenated together.
Optional character strings placed as the first element of the index
expression can be used to change the output. The strings 'r' or 'c' result
in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
(column) matrix is produced. If the result is 2-D then both provide the
same matrix result.
A string integer specifies which axis to stack multiple comma separated
arrays along. A string of two comma-separated integers allows indication
of the minimum number of dimensions to force each entry into as the
second integer (the axis to concatenate along is still the first integer).
A string with three comma-separated integers allows specification of the
axis to concatenate along, the minimum number of dimensions to force the
entries to, and which axis should contain the start of the arrays which
are less than the specified number of dimensions. In other words the third
integer allows you to specify where the 1's should be placed in the shape
of the arrays that have their shapes upgraded. By default, they are placed
in the front of the shape tuple. The third argument allows you to specify
where the start of the array should be instead. Thus, a third argument of
'0' would place the 1's at the end of the array shape. Negative integers
specify where in the new shape tuple the last dimension of upgraded arrays
should be placed, so the default is '-1'.
Parameters
----------
Not a function, so takes no parameters
Returns
-------
A concatenated ndarray or matrix.
See Also
--------
concatenate : Join a sequence of arrays together.
c_ : Translates slice objects to concatenation along the second axis.
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
String integers specify the axis to concatenate along or the minimum
number of dimensions to force entries into.
>>> np.r_['-1', a, a] # concatenate along last axis
array([[0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5]])
>>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
array([[1, 2, 3],
[4, 5, 6]])
>>> np.r_['0,2,0', [1,2,3], [4,5,6]]
array([[1],
[2],
[3],
[4],
[5],
[6]])
>>> np.r_['1,2,0', [1,2,3], [4,5,6]]
array([[1, 4],
[2, 5],
[3, 6]])
Using 'r' or 'c' as a first string argument creates a matrix.
>>> np.r_['r',[1,2,3], [4,5,6]]
matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""
Translates slice objects to concatenation along the second axis.
This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see `r_`.
Examples
--------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([[1, 2, 3, 0, 0, 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
a : ndarray
Input array.
See Also
--------
ndindex, flatiter
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def next(self):
"""
Standard iterator method, returns the index tuple and array value.
Returns
-------
coords : tuple of ints
The indices of the current iteration.
val : scalar
The array element of the current iteration.
"""
return self.iter.coords, self.iter.next()
def __iter__(self):
return self
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
the N-dimensional index of the array. At each iteration a tuple
of indices is returned, the last dimension is iterated over first.
Parameters
----------
`*args` : ints
The size of each dimension of the array.
See Also
--------
ndenumerate, flatiter
Examples
--------
>>> for index in np.ndindex(3, 2, 1):
... print index
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
self.nd = len(args)
self.ind = [0]*self.nd
self.index = 0
self.maxvals = args
tot = 1
for k in range(self.nd):
tot *= args[k]
self.total = tot
def _incrementone(self, axis):
if (axis < 0): # base case
return
if (self.ind[axis] < self.maxvals[axis]-1):
self.ind[axis] += 1
else:
self.ind[axis] = 0
self._incrementone(axis-1)
def ndincr(self):
"""
Increment the multi-dimensional index by one.
`ndincr` takes care of the "wrapping around" of the axes.
It is called by `ndindex.next` and not normally used directly.
"""
self._incrementone(self.nd-1)
def next(self):
"""
Standard iterator method, updates the index and returns the index tuple.
Returns
-------
val : tuple of ints
Returns a tuple containing the indices of the current iteration.
"""
if (self.index >= self.total):
raise StopIteration
val = tuple(self.ind)
self.index += 1
self.ndincr()
return val
def __iter__(self):
return self
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
.. note::
Use one of the two predefined instances `index_exp` or `s_`
rather than directly using `IndexExpression`.
For any index combination, including slicing and axis insertion,
``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
array `a`. However, ``np.index_exp[indices]`` can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
Parameters
----------
maketuple : bool
If True, always returns a tuple.
See Also
--------
index_exp : Predefined instance that always returns a tuple:
`index_exp = IndexExpression(maketuple=True)`.
s_ : Predefined instance without tuple conversion:
`s_ = IndexExpression(maketuple=False)`.
Notes
-----
You can do all this with `slice()` plus a few special objects,
but there's a lot to remember and this version is simpler because
it uses the standard array indexing syntax.
Examples
--------
>>> np.s_[2::2]
slice(2, None, 2)
>>> np.index_exp[2::2]
(slice(2, None, 2),)
>>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
array([2, 4])
"""
maxint = sys.maxint
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and type(item) != type(()):
return (item,)
else:
return item
def __len__(self):
return self.maxint
def __getslice__(self, start, stop):
if stop == self.maxint:
stop = None
return self[start:stop:None]
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
def fill_diagonal(a, val):
"""
Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim > 2``, the diagonal is the list of
locations with indices ``a[i, i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
See also
--------
diag_indices, diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
This functionality can be obtained via `diag_indices`, but internally
this version uses a much faster implementation that never constructs the
indices and uses simple slicing.
Examples
--------
>>> a = zeros((3, 3), int)
>>> fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = zeros((3, 3, 3, 3), int)
>>> fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
if a.ndim == 2:
# Explicit, fast formula for the common case. For 2-d arrays, we
# accept rectangular ones.
step = a.shape[1] + 1
else:
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(a.shape)==0):
raise ValueError("All dimensions of input must be of equal length")
step = 1 + (cumprod(a.shape[:-1])).sum()
# Write the value out into the diagonal.
a.flat[::step] = val
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
(n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
for ``i = [0..n-1]``.
Parameters
----------
n : int
The size, along each dimension, of the arrays for which the returned
indices can be used.
ndim : int, optional
The number of dimensions.
See also
--------
diag_indices_from
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Create a set of indices to access the diagonal of a (4, 4) array:
>>> di = np.diag_indices(4)
>>> di
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> a[di] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
Now, we create indices to manipulate a 3-D array:
>>> d3 = np.diag_indices(2, 3)
>>> d3
(array([0, 1]), array([0, 1]), array([0, 1]))
And use it to set the diagonal of an array of zeros to 1:
>>> a = np.zeros((2, 2, 2), dtype=np.int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]])
"""
idx = arange(n)
return (idx,) * ndim
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
See `diag_indices` for full details.
Parameters
----------
arr : array, at least 2-D
See Also
--------
diag_indices
Notes
-----
.. versionadded:: 1.4.0
"""
if not arr.ndim >= 2:
raise ValueError("input array must be at least 2-d")
# For more than d=2, the strided formula is only valid for arrays with
# all dimensions equal, so we check first.
if not alltrue(diff(arr.shape) == 0):
raise ValueError("All dimensions of input must be of equal length")
return diag_indices(arr.shape[0], arr.ndim)
| {
"repo_name": "plaes/numpy",
"path": "numpy/lib/index_tricks.py",
"copies": "2",
"size": "26790",
"license": "bsd-3-clause",
"hash": -6568486314197036000,
"line_mean": 28.7666666667,
"line_max": 80,
"alpha_frac": 0.5323628219,
"autogenerated": false,
"ratio": 3.7515754096064975,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001863226413509201,
"num_lines": 900
} |
__all__ = ['unravel_index',
'mgrid',
'ogrid',
'r_', 'c_', 's_',
'index_exp', 'ix_',
'ndenumerate','ndindex']
import sys
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, ScalarType, array, dtype
from numpy.core.numerictypes import find_common_type
import math
import function_base
import numpy.core.defmatrix as matrix
makemat = matrix.matrix
# contributed by Stefan van der Walt
def unravel_index(x,dims):
"""
Convert a flat index into an index tuple for an array of given shape.
Parameters
----------
x : int
Flattened index.
dims : shape tuple
Input shape.
Notes
-----
In the Examples section, since ``arr.flat[x] == arr.max()`` it may be
easier to use flattened indexing than to re-map the index to a tuple.
Examples
--------
>>> arr = np.ones((5,4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> x = arr.argmax()
>>> x
19
>>> dims = arr.shape
>>> idx = np.unravel_index(x, dims)
>>> idx
(4, 3)
>>> arr[idx] == arr.max()
True
"""
if x > _nx.prod(dims)-1 or x < 0:
raise ValueError("Invalid index, must be 0 <= x <= number of elements.")
idx = _nx.empty_like(dims)
# Take dimensions
# [a,b,c,d]
# Reverse and drop first element
# [d,c,b]
# Prepend [1]
# [1,d,c,b]
# Calculate cumulative product
# [1,d,dc,dcb]
# Reverse
# [dcb,dc,d,1]
dim_prod = _nx.cumprod([1] + list(dims)[:0:-1])[::-1]
# Indices become [x/dcb % a, x/dc % b, x/d % c, x/1 % d]
return tuple(x/dim_prod % dims)
def ix_(*args):
""" Construct an open mesh from multiple sequences.
This function takes n 1-d sequences and returns n outputs with n
dimensions each such that the shape is 1 in all but one dimension and
the dimension with the non-unit shape value cycles through all n
dimensions.
Using ix_() one can quickly construct index arrays that will index
the cross product.
a[ix_([1,3,7],[2,5,8])] returns the array
a[1,2] a[1,5] a[1,8]
a[3,2] a[3,5] a[3,8]
a[7,2] a[7,5] a[7,8]
"""
out = []
nd = len(args)
baseshape = [1]*nd
for k in range(nd):
new = _nx.asarray(args[k])
if (new.ndim != 1):
raise ValueError, "Cross index must be 1 dimensional"
if issubclass(new.dtype.type, _nx.bool_):
new = new.nonzero()[0]
baseshape[k] = len(new)
new = new.reshape(tuple(baseshape))
out.append(new)
baseshape[k] = 1
return tuple(out)
class nd_grid(object):
"""
Construct a multi-dimensional "meshgrid".
grid = nd_grid() creates an instance which will return a mesh-grid
when indexed. The dimension and number of the output arrays are equal
to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then the
integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
If instantiated with an argument of sparse=True, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each returned
argument is greater than 1
Examples
--------
>>> mgrid = np.lib.index_tricks.nd_grid()
>>> mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
<BLANKLINE>
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self,key):
try:
size = []
typ = int
for k in range(len(key)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
size.append(int(abs(step)))
typ = float
else:
size.append(math.ceil((key[k].stop - start)/(step*1.0)))
if isinstance(step, float) or \
isinstance(start, float) or \
isinstance(key[k].stop, float):
typ = float
if self.sparse:
nn = map(lambda x,t: _nx.arange(x, dtype=t), size, \
(typ,)*len(size))
else:
nn = _nx.indices(size, typ)
for k in range(len(size)):
step = key[k].step
start = key[k].start
if start is None: start=0
if step is None: step=1
if isinstance(step, complex):
step = int(abs(step))
if step != 1:
step = (key[k].stop - start)/float(step-1)
nn[k] = (nn[k]*step+start)
if self.sparse:
slobj = [_nx.newaxis]*len(size)
for k in range(len(size)):
slobj[k] = slice(None,None)
nn[k] = nn[k][slobj]
slobj[k] = _nx.newaxis
return nn
except (IndexError, TypeError):
step = key.step
stop = key.stop
start = key.start
if start is None: start = 0
if isinstance(step, complex):
step = abs(step)
length = int(step)
if step != 1:
step = (key.stop-start)/float(step-1)
stop = key.stop+step
return _nx.arange(0, length,1, float)*step + start
else:
return _nx.arange(start, stop, step)
def __getslice__(self,i,j):
return _nx.arange(i,j)
def __len__(self):
return 0
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
class AxisConcatenator(object):
"""Translates slice objects to concatenation along an axis.
"""
def _retval(self, res):
if self.matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and self.col:
res = res.T
self.axis = self._axis
self.matrix = self._matrix
self.col = 0
return res
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
def __getitem__(self,key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key,frame.f_globals,frame.f_locals)
return mymat
if type(key) is not tuple:
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if type(key[k]) is slice:
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None: start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj,copy=False,ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1,trans1d)
elif isinstance(key[k],str):
if k != 0:
raise ValueError, "special directives must be the"\
"first entry."
key0 = key[0]
if key0 in 'rc':
self.matrix = True
self.col = (key0 == 'c')
continue
if ',' in key0:
vec = key0.split(',')
try:
self.axis, ndmin = \
[int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError, "unknown special directive"
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError, "unknown special directive"
elif type(key[k]) in ScalarType:
newobj = array(key[k],ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True,
ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin-tempobj.ndim
if (trans1d < 0):
trans1d += k2 + 1
defaxes = range(ndmin)
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs),axis=self.axis)
return self._retval(res)
def __getslice__(self,i,j):
res = _nx.arange(i,j)
return self._retval(res)
def __len__(self):
return 0
# separate classes are used here instead of just making r_ = concatentor(0),
# etc. because otherwise we couldn't get the doc string to come out right
# in help(r_)
class RClass(AxisConcatenator):
"""Translates slice objects to concatenation along the first axis.
For example:
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
AxisConcatenator.__init__(self, 0)
r_ = RClass()
class CClass(AxisConcatenator):
"""Translates slice objects to concatenation along the second axis.
For example:
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
c_ = CClass()
class ndenumerate(object):
"""
Multidimensional index iterator.
Return an iterator yielding pairs of array coordinates and values.
Parameters
----------
a : ndarray
Input array.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
(0, 1) 2
(1, 0) 3
(1, 1) 4
"""
def __init__(self, arr):
self.iter = asarray(arr).flat
def next(self):
return self.iter.coords, self.iter.next()
def __iter__(self):
return self
class ndindex(object):
"""Pass in a sequence of integers corresponding
to the number of dimensions in the counter. This iterator
will then return an N-dimensional counter.
Example:
>>> for index in np.ndindex(3,2,1):
... print index
(0, 0, 0)
(0, 1, 0)
(1, 0, 0)
(1, 1, 0)
(2, 0, 0)
(2, 1, 0)
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
self.nd = len(args)
self.ind = [0]*self.nd
self.index = 0
self.maxvals = args
tot = 1
for k in range(self.nd):
tot *= args[k]
self.total = tot
def _incrementone(self, axis):
if (axis < 0): # base case
return
if (self.ind[axis] < self.maxvals[axis]-1):
self.ind[axis] += 1
else:
self.ind[axis] = 0
self._incrementone(axis-1)
def ndincr(self):
self._incrementone(self.nd-1)
def next(self):
if (self.index >= self.total):
raise StopIteration
val = tuple(self.ind)
self.index += 1
self.ndincr()
return val
def __iter__(self):
return self
# You can do all this with slice() plus a few special objects,
# but there's a lot to remember. This version is simpler because
# it uses the standard array indexing syntax.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Cosmetic changes by T. Oliphant 2001
#
#
class IndexExpression(object):
"""
A nicer way to build up index tuples for arrays.
For any index combination, including slicing and axis insertion,
'a[indices]' is the same as 'a[index_exp[indices]]' for any
array 'a'. However, 'index_exp[indices]' can be used anywhere
in Python code and returns a tuple of slice objects that can be
used in the construction of complex index expressions.
"""
maxint = sys.maxint
def __init__(self, maketuple):
self.maketuple = maketuple
def __getitem__(self, item):
if self.maketuple and type(item) != type(()):
return (item,)
else:
return item
def __len__(self):
return self.maxint
def __getslice__(self, start, stop):
if stop == self.maxint:
stop = None
return self[start:stop:None]
index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
| {
"repo_name": "houseind/robothon",
"path": "GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/index_tricks.py",
"copies": "1",
"size": "14839",
"license": "mit",
"hash": 5153041969742579000,
"line_mean": 29.0993914807,
"line_max": 80,
"alpha_frac": 0.4986858953,
"autogenerated": false,
"ratio": 3.7023453093812377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47010312046812375,
"avg_score": null,
"num_lines": null
} |
__all__ = ['unsigned_volume','signed_volume']
from scipy import sqrt,inner,shape,asarray
from scipy.misc import factorial
from scipy.linalg import det
def unsigned_volume(pts):
"""Unsigned volume of a simplex
Computes the unsigned volume of an M-simplex embedded in N-dimensional
space. The points are stored row-wise in an array with shape (M+1,N).
Parameters
----------
pts : array
Array with shape (M+1,N) containing the coordinates
of the (M+1) vertices of the M-simplex.
Returns
-------
volume : scalar
Unsigned volume of the simplex
Notes
-----
Zero-dimensional simplices (points) are assigned unit volumes.
Examples
--------
>>> # 0-simplex point
>>> unsigned_volume( [[0,0]] )
1.0
>>> # 1-simplex line segment
>>> unsigned_volume( [[0,0],[1,0]] )
1.0
>>> # 2-simplex triangle
>>> unsigned_volume( [[0,0,0],[0,1,0],[1,0,0]] )
0.5
References
----------
[1] http://www.math.niu.edu/~rusin/known-math/97/volumes.polyh
"""
pts = asarray(pts)
M,N = pts.shape
M -= 1
if M < 0 or M > N:
raise ValueError('array has invalid shape')
if M == 0:
return 1.0
A = pts[1:] - pts[0]
return sqrt(det(inner(A,A)))/factorial(M)
def signed_volume(pts):
"""Signed volume of a simplex
Computes the signed volume of an M-simplex embedded in M-dimensional
space. The points are stored row-wise in an array with shape (M+1,M).
Parameters
----------
pts : array
Array with shape (M+1,M) containing the coordinates
of the (M+1) vertices of the M-simplex.
Returns
-------
volume : scalar
Signed volume of the simplex
Examples
--------
>>> # 1-simplex line segment
>>> signed_volume( [[0],[1]] )
1.0
>>> # 2-simplex triangle
>>> signed_volume( [[0,0],[1,0],[0,1]] )
0.5
>>> # 3-simplex tetrahedron
>>> signed_volume( [[0,0,0],[3,0,0],[0,1,0],[0,0,1]] )
0.5
References
----------
[1] http://www.math.niu.edu/~rusin/known-math/97/volumes.polyh
"""
pts = asarray(pts)
M,N = pts.shape
M -= 1
if M != N:
raise ValueError('array has invalid shape')
A = pts[1:] - pts[0]
return det(A)/factorial(M)
| {
"repo_name": "DongliangGao/pydec",
"path": "pydec/math/volume.py",
"copies": "6",
"size": "2450",
"license": "bsd-3-clause",
"hash": -7365131900998904000,
"line_mean": 21.4770642202,
"line_max": 75,
"alpha_frac": 0.5326530612,
"autogenerated": false,
"ratio": 3.555878084179971,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02943839665502486,
"num_lines": 109
} |
__all__ = ('UpdatedPermissionChecksMixin',)
class UpdatedPermissionChecksMixin(object):
"""
Updated permission checks mixin.
"""
def check_permissions(self, request):
"""
Check if the request should be permitted.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
has_perm = permission.has_permission(request, self)
if not has_perm:
message = has_perm.message \
if hasattr(has_perm, 'message') and has_perm.message \
else getattr(permission, 'message', None)
self.permission_denied(
request, message=message
)
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
"""
for permission in self.get_permissions():
has_perm = permission.has_object_permission(request, self, obj)
if not has_perm:
message = has_perm.message \
if hasattr(has_perm, 'message') and has_perm.message \
else getattr(permission, 'message', None)
self.permission_denied(
request, message=message
)
| {
"repo_name": "barseghyanartur/rest_condition",
"path": "rest_condition/mixins.py",
"copies": "1",
"size": "1420",
"license": "mit",
"hash": 8450159942854077000,
"line_mean": 39.5714285714,
"line_max": 75,
"alpha_frac": 0.5683098592,
"autogenerated": false,
"ratio": 4.982456140350878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6050765999550878,
"avg_score": null,
"num_lines": null
} |
"""All uploaded files are directly send back to the client."""
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request
from werkzeug.wrappers import Response
from werkzeug.wsgi import wrap_file
def view_file(req):
if "uploaded_file" not in req.files:
return Response("no file uploaded")
f = req.files["uploaded_file"]
return Response(
wrap_file(req.environ, f), mimetype=f.content_type, direct_passthrough=True
)
def upload_file(req):
return Response(
"""<h1>Upload File</h1>
<form action="" method="post" enctype="multipart/form-data">
<input type="file" name="uploaded_file">
<input type="submit" value="Upload">
</form>""",
mimetype="text/html",
)
def application(environ, start_response):
req = Request(environ)
if req.method == "POST":
resp = view_file(req)
else:
resp = upload_file(req)
return resp(environ, start_response)
if __name__ == "__main__":
run_simple("localhost", 5000, application, use_debugger=True)
| {
"repo_name": "pallets/werkzeug",
"path": "examples/upload.py",
"copies": "1",
"size": "1081",
"license": "bsd-3-clause",
"hash": 807087454680502400,
"line_mean": 27.4473684211,
"line_max": 83,
"alpha_frac": 0.6419981499,
"autogenerated": false,
"ratio": 3.8469750889679717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49889732388679714,
"avg_score": null,
"num_lines": null
} |
__all__ = ('UrlDispatcher', 'UrlMappingMatchInfo',
'Route', 'PlainRoute', 'DynamicRoute', 'StaticRoute')
import abc
import asyncio
import collections
import mimetypes
import re
import os
import inspect
from urllib.parse import urlencode
from . import hdrs
from .abc import AbstractRouter, AbstractMatchInfo
from .protocol import HttpVersion11
from .web_exceptions import HTTPMethodNotAllowed, HTTPNotFound
from aio2py.required.aiohttp.web_reqrep import StreamResponse
class UrlMappingMatchInfo(dict, AbstractMatchInfo):
def __init__(self, match_dict, route):
super().__init__(match_dict)
self._route = route
@property
def handler(self):
return self._route.handler
@property
def route(self):
return self._route
def __repr__(self):
return "<MatchInfo {}: {}>".format(super().__repr__(), self._route)
@asyncio.coroutine
def _defaultExpectHandler(request):
"""Default handler for Except: 100-continue"""
if request.version == HttpVersion11:
request.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
class Route(metaclass=abc.ABCMeta):
def __init__(self, method, handler, name, *, expect_handler=None):
if expect_handler is None:
expect_handler = _defaultExpectHandler
assert asyncio.iscoroutinefunction(expect_handler), \
'Coroutine is expected, got {!r}'.format(expect_handler)
self._method = method
self._handler = handler
self._name = name
self._expect_handler = expect_handler
@property
def method(self):
return self._method
@property
def handler(self):
return self._handler
@property
def name(self):
return self._name
@abc.abstractmethod # pragma: no branch
def match(self, path):
"""Return dict with info for given path or
None if route cannot process path."""
@abc.abstractmethod # pragma: no branch
def url(self, **kwargs):
"""Construct url for route with additional params."""
@asyncio.coroutine
def handle_expect_header(self, request):
return (yield from self._expect_handler(request))
@staticmethod
def _append_query(url, query):
if query is not None:
return url + "?" + urlencode(query)
else:
return url
class PlainRoute(Route):
def __init__(self, method, handler, name, path, *, expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._path = path
def match(self, path):
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def url(self, *, query=None):
return self._append_query(self._path, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<PlainRoute {name}[{method}] {path} -> {handler!r}".format(
name=name, method=self.method, path=self._path,
handler=self.handler)
class DynamicRoute(Route):
def __init__(self, method, handler, name, pattern, formatter, *,
expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._pattern = pattern
self._formatter = formatter
def match(self, path):
match = self._pattern.match(path)
if match is None:
return None
else:
return match.groupdict()
def url(self, *, parts, query=None):
url = self._formatter.format_map(parts)
return self._append_query(url, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DynamicRoute {name}[{method}] {formatter} -> {handler!r}"
.format(name=name, method=self.method,
formatter=self._formatter, handler=self.handler))
class StaticRoute(Route):
def __init__(self, name, prefix, directory, *,
expect_handler=None, chunk_size=256*1024):
assert prefix.startswith('/'), prefix
assert prefix.endswith('/'), prefix
super().__init__(
'GET', self.handle, name, expect_handler=expect_handler)
self._prefix = prefix
self._prefix_len = len(self._prefix)
self._directory = os.path.abspath(directory) + os.sep
self._chunk_size = chunk_size
if not os.path.isdir(self._directory):
raise ValueError(
"No directory exists at '{}'".format(self._directory))
def match(self, path):
if not path.startswith(self._prefix):
return None
return {'filename': path[self._prefix_len:]}
def url(self, *, filename, query=None):
while filename.startswith('/'):
filename = filename[1:]
url = self._prefix + filename
return self._append_query(url, query)
@asyncio.coroutine
def handle(self, request):
resp = StreamResponse()
filename = request.match_info['filename']
filepath = os.path.abspath(os.path.join(self._directory, filename))
if not filepath.startswith(self._directory):
raise HTTPNotFound()
if not os.path.exists(filepath) or not os.path.isfile(filepath):
raise HTTPNotFound()
ct, encoding = mimetypes.guess_type(filepath)
if not ct:
ct = 'application/octet-stream'
resp.content_type = ct
if encoding:
resp.headers['content-encoding'] = encoding
file_size = os.stat(filepath).st_size
single_chunk = file_size < self._chunk_size
if single_chunk:
resp.content_length = file_size
resp.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(self._chunk_size)
if single_chunk:
resp.write(chunk)
else:
while chunk:
resp.write(chunk)
chunk = f.read(self._chunk_size)
return resp
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<StaticRoute {name}[{method}] {path} -> {directory!r}".format(
name=name, method=self.method, path=self._prefix,
directory=self._directory)
class SystemRoute(Route):
def __init__(self, status, reason):
super().__init__(hdrs.METH_ANY, None, None)
self._status = status
self._reason = reason
def url(self, **kwargs):
raise RuntimeError(".url() is not allowed for SystemRoute")
def match(self, path):
return None
@property
def status(self):
return self._status
@property
def reason(self):
return self._reason
def __repr__(self):
return "<SystemRoute {status}: {reason}>".format(status=self._status,
reason=self._reason)
class _NotFoundMatchInfo(UrlMappingMatchInfo):
route = SystemRoute(404, 'Not Found')
def __init__(self):
super().__init__({}, None)
@property
def handler(self):
return self._not_found
@asyncio.coroutine
def _not_found(self, request):
raise HTTPNotFound()
def __repr__(self):
return "<MatchInfo: not found>"
class _MethodNotAllowedMatchInfo(UrlMappingMatchInfo):
route = SystemRoute(405, 'Method Not Allowed')
def __init__(self, method, allowed_methods):
super().__init__({}, None)
self._method = method
self._allowed_methods = allowed_methods
@property
def handler(self):
return self._not_allowed
@asyncio.coroutine
def _not_allowed(self, request):
raise HTTPMethodNotAllowed(self._method, self._allowed_methods)
def __repr__(self):
return ("<MatchInfo: method {} is not allowed (allowed methods: {}>"
.format(self._method,
', '.join(sorted(self._allowed_methods))))
class UrlDispatcher(AbstractRouter, collections.abc.Mapping):
DYN = re.compile(r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*)\}$')
DYN_WITH_RE = re.compile(
r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}$')
GOOD = r'[^{}/]+'
ROUTE_RE = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
METHODS = {hdrs.METH_ANY, hdrs.METH_POST,
hdrs.METH_GET, hdrs.METH_PUT, hdrs.METH_DELETE,
hdrs.METH_PATCH, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
def __init__(self):
super().__init__()
self._urls = []
self._routes = {}
@asyncio.coroutine
def resolve(self, request):
path = request.path
method = request.method
allowed_methods = set()
for route in self._urls:
match_dict = route.match(path)
if match_dict is None:
continue
route_method = route.method
if route_method == method or route_method == hdrs.METH_ANY:
return UrlMappingMatchInfo(match_dict, route)
allowed_methods.add(route_method)
else:
if allowed_methods:
return _MethodNotAllowedMatchInfo(method, allowed_methods)
else:
return _NotFoundMatchInfo()
def __iter__(self):
return iter(self._routes)
def __len__(self):
return len(self._routes)
def __contains__(self, name):
return name in self._routes
def __getitem__(self, name):
return self._routes[name]
def register_route(self, route):
assert isinstance(route, Route), 'Instance of Route class is required.'
name = route.name
if name is not None:
if name in self._routes:
raise ValueError('Duplicate {!r}, '
'already handled by {!r}'
.format(name, self._routes[name]))
else:
self._routes[name] = route
self._urls.append(route)
def add_route(self, method, path, handler,
*, name=None, expect_handler=None):
assert callable(handler), handler
if (not asyncio.iscoroutinefunction(handler) and
not inspect.isgeneratorfunction(handler)):
handler = asyncio.coroutine(handler)
method = method.upper()
assert method in self.METHODS, method
if not ('{' in path or '}' in path or self.ROUTE_RE.search(path)):
route = PlainRoute(
method, handler, name, path, expect_handler=expect_handler)
self.register_route(route)
return route
pattern = ''
formatter = ''
for part in self.ROUTE_RE.split(path):
match = self.DYN.match(part)
if match:
pattern += '(?P<{}>{})'.format(match.group('var'), self.GOOD)
formatter += '{' + match.group('var') + '}'
continue
match = self.DYN_WITH_RE.match(part)
if match:
pattern += '(?P<{var}>{re})'.format(**match.groupdict())
formatter += '{' + match.group('var') + '}'
continue
if '{' in part or '}' in part:
raise ValueError("Invalid path '{}'['{}']".format(path, part))
formatter += part
pattern += re.escape(part)
try:
compiled = re.compile('^' + pattern + '$')
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None
route = DynamicRoute(
method, handler, name, compiled,
formatter, expect_handler=expect_handler)
self.register_route(route)
return route
def add_static(self, prefix, path, *, name=None, expect_handler=None,
chunk_size=256*1024):
"""
Adds static files view
:param prefix - url prefix
:param path - folder with files
"""
assert prefix.startswith('/')
if not prefix.endswith('/'):
prefix += '/'
route = StaticRoute(name, prefix, path,
expect_handler=expect_handler,
chunk_size=chunk_size)
self.register_route(route)
return route
| {
"repo_name": "lfblogs/aio2py",
"path": "aio2py/required/aiohttp/web_urldispatcher.py",
"copies": "1",
"size": "12462",
"license": "apache-2.0",
"hash": -730965548044607500,
"line_mean": 29.7703703704,
"line_max": 79,
"alpha_frac": 0.5656395442,
"autogenerated": false,
"ratio": 4.1875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 405
} |
__all__ = ('UrlDispatcher', 'UrlMappingMatchInfo',
'Route', 'PlainRoute', 'DynamicRoute', 'StaticRoute')
import abc
import asyncio
import collections
import mimetypes
import re
import os
import inspect
from urllib.parse import urlencode, unquote
from . import hdrs
from .abc import AbstractRouter, AbstractMatchInfo
from .protocol import HttpVersion11
from .web_exceptions import HTTPMethodNotAllowed, HTTPNotFound, HTTPNotModified
from .web_reqrep import StreamResponse
from .multidict import upstr
class UrlMappingMatchInfo(dict, AbstractMatchInfo):
def __init__(self, match_dict, route):
super().__init__(match_dict)
self._route = route
@property
def handler(self):
return self._route.handler
@property
def route(self):
return self._route
def __repr__(self):
return "<MatchInfo {}: {}>".format(super().__repr__(), self._route)
@asyncio.coroutine
def _defaultExpectHandler(request):
"""Default handler for Except: 100-continue"""
if request.version == HttpVersion11:
request.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
class Route(metaclass=abc.ABCMeta):
def __init__(self, method, handler, name, *, expect_handler=None):
if expect_handler is None:
expect_handler = _defaultExpectHandler
assert asyncio.iscoroutinefunction(expect_handler), \
'Coroutine is expected, got {!r}'.format(expect_handler)
self._method = method
self._handler = handler
self._name = name
self._expect_handler = expect_handler
@property
def method(self):
return self._method
@property
def handler(self):
return self._handler
@property
def name(self):
return self._name
@abc.abstractmethod # pragma: no branch
def match(self, path):
"""Return dict with info for given path or
None if route cannot process path."""
@abc.abstractmethod # pragma: no branch
def url(self, **kwargs):
"""Construct url for route with additional params."""
@asyncio.coroutine
def handle_expect_header(self, request):
return (yield from self._expect_handler(request))
@staticmethod
def _append_query(url, query):
if query is not None:
return url + "?" + urlencode(query)
else:
return url
class PlainRoute(Route):
def __init__(self, method, handler, name, path, *, expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._path = path
def match(self, path):
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def url(self, *, query=None):
return self._append_query(self._path, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<PlainRoute {name}[{method}] {path} -> {handler!r}".format(
name=name, method=self.method, path=self._path,
handler=self.handler)
class DynamicRoute(Route):
def __init__(self, method, handler, name, pattern, formatter, *,
expect_handler=None):
super().__init__(method, handler, name, expect_handler=expect_handler)
self._pattern = pattern
self._formatter = formatter
def match(self, path):
match = self._pattern.match(path)
if match is None:
return None
else:
return match.groupdict()
def url(self, *, parts, query=None):
url = self._formatter.format_map(parts)
return self._append_query(url, query)
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DynamicRoute {name}[{method}] {formatter} -> {handler!r}"
.format(name=name, method=self.method,
formatter=self._formatter, handler=self.handler))
class StaticRoute(Route):
def __init__(self, name, prefix, directory, *,
expect_handler=None, chunk_size=256*1024,
response_factory=None):
assert prefix.startswith('/'), prefix
assert prefix.endswith('/'), prefix
super().__init__(
'GET', self.handle, name, expect_handler=expect_handler)
self._prefix = prefix
self._prefix_len = len(self._prefix)
self._directory = os.path.abspath(directory) + os.sep
self._chunk_size = chunk_size
if response_factory is None:
self._response_factory = StreamResponse
else:
self._response_factory = response_factory
if not os.path.isdir(self._directory):
raise ValueError(
"No directory exists at '{}'".format(self._directory))
def match(self, path):
if not path.startswith(self._prefix):
return None
return {'filename': path[self._prefix_len:]}
def url(self, *, filename, query=None):
while filename.startswith('/'):
filename = filename[1:]
url = self._prefix + filename
return self._append_query(url, query)
@asyncio.coroutine
def handle(self, request):
filename = request.match_info['filename']
filepath = os.path.abspath(os.path.join(self._directory, filename))
if not filepath.startswith(self._directory):
raise HTTPNotFound()
if not os.path.exists(filepath) or not os.path.isfile(filepath):
raise HTTPNotFound()
st = os.stat(filepath)
modsince = request.if_modified_since
if modsince is not None and st.st_mtime <= modsince.timestamp():
raise HTTPNotModified()
ct, encoding = mimetypes.guess_type(filepath)
if not ct:
ct = 'application/octet-stream'
resp = self._response_factory()
resp.content_type = ct
if encoding:
resp.headers[hdrs.CONTENT_ENCODING] = encoding
resp.last_modified = st.st_mtime
file_size = st.st_size
single_chunk = file_size < self._chunk_size
if single_chunk:
resp.content_length = file_size
resp.start(request)
with open(filepath, 'rb') as f:
chunk = f.read(self._chunk_size)
if single_chunk:
resp.write(chunk)
else:
while chunk:
resp.write(chunk)
chunk = f.read(self._chunk_size)
return resp
def __repr__(self):
name = "'" + self.name + "' " if self.name is not None else ""
return "<StaticRoute {name}[{method}] {path} -> {directory!r}".format(
name=name, method=self.method, path=self._prefix,
directory=self._directory)
class SystemRoute(Route):
def __init__(self, status, reason):
super().__init__(hdrs.METH_ANY, None, None)
self._status = status
self._reason = reason
def url(self, **kwargs):
raise RuntimeError(".url() is not allowed for SystemRoute")
def match(self, path):
return None
@property
def status(self):
return self._status
@property
def reason(self):
return self._reason
def __repr__(self):
return "<SystemRoute {status}: {reason}>".format(status=self._status,
reason=self._reason)
class _NotFoundMatchInfo(UrlMappingMatchInfo):
route = SystemRoute(404, 'Not Found')
def __init__(self):
super().__init__({}, None)
@property
def handler(self):
return self._not_found
@asyncio.coroutine
def _not_found(self, request):
raise HTTPNotFound()
def __repr__(self):
return "<MatchInfo: not found>"
class _MethodNotAllowedMatchInfo(UrlMappingMatchInfo):
route = SystemRoute(405, 'Method Not Allowed')
def __init__(self, method, allowed_methods):
super().__init__({}, None)
self._method = method
self._allowed_methods = allowed_methods
@property
def handler(self):
return self._not_allowed
@asyncio.coroutine
def _not_allowed(self, request):
raise HTTPMethodNotAllowed(self._method, self._allowed_methods)
def __repr__(self):
return ("<MatchInfo: method {} is not allowed (allowed methods: {}>"
.format(self._method,
', '.join(sorted(self._allowed_methods))))
class UrlDispatcher(AbstractRouter, collections.abc.Mapping):
DYN = re.compile(r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*)\}$')
DYN_WITH_RE = re.compile(
r'^\{(?P<var>[a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}$')
GOOD = r'[^{}/]+'
ROUTE_RE = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
METHODS = {hdrs.METH_ANY, hdrs.METH_POST,
hdrs.METH_GET, hdrs.METH_PUT, hdrs.METH_DELETE,
hdrs.METH_PATCH, hdrs.METH_HEAD, hdrs.METH_OPTIONS}
def __init__(self):
super().__init__()
self._urls = []
self._routes = {}
@asyncio.coroutine
def resolve(self, request):
path = request.raw_path
method = request.method
allowed_methods = set()
for route in self._urls:
match_dict = route.match(path)
if match_dict is None:
continue
route_method = route.method
if route_method == method or route_method == hdrs.METH_ANY:
# Unquote separate matching parts
match_dict = {key: unquote(value) for key, value in
match_dict.items()}
return UrlMappingMatchInfo(match_dict, route)
allowed_methods.add(route_method)
else:
if allowed_methods:
return _MethodNotAllowedMatchInfo(method, allowed_methods)
else:
return _NotFoundMatchInfo()
def __iter__(self):
return iter(self._routes)
def __len__(self):
return len(self._routes)
def __contains__(self, name):
return name in self._routes
def __getitem__(self, name):
return self._routes[name]
def register_route(self, route):
assert isinstance(route, Route), 'Instance of Route class is required.'
name = route.name
if name is not None:
if name in self._routes:
raise ValueError('Duplicate {!r}, '
'already handled by {!r}'
.format(name, self._routes[name]))
else:
self._routes[name] = route
self._urls.append(route)
def add_route(self, method, path, handler,
*, name=None, expect_handler=None):
if not path.startswith('/'):
raise ValueError("path should be started with /")
assert callable(handler), handler
if (not asyncio.iscoroutinefunction(handler) and
not inspect.isgeneratorfunction(handler)):
handler = asyncio.coroutine(handler)
method = upstr(method)
if method not in self.METHODS:
raise ValueError("{} is not allowed HTTP method".format(method))
if not ('{' in path or '}' in path or self.ROUTE_RE.search(path)):
route = PlainRoute(
method, handler, name, path, expect_handler=expect_handler)
self.register_route(route)
return route
pattern = ''
formatter = ''
for part in self.ROUTE_RE.split(path):
match = self.DYN.match(part)
if match:
pattern += '(?P<{}>{})'.format(match.group('var'), self.GOOD)
formatter += '{' + match.group('var') + '}'
continue
match = self.DYN_WITH_RE.match(part)
if match:
pattern += '(?P<{var}>{re})'.format(**match.groupdict())
formatter += '{' + match.group('var') + '}'
continue
if '{' in part or '}' in part:
raise ValueError("Invalid path '{}'['{}']".format(path, part))
formatter += part
pattern += re.escape(part)
try:
compiled = re.compile('^' + pattern + '$')
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None
route = DynamicRoute(
method, handler, name, compiled,
formatter, expect_handler=expect_handler)
self.register_route(route)
return route
def add_static(self, prefix, path, *, name=None, expect_handler=None,
chunk_size=256*1024, response_factory=None):
"""
Adds static files view
:param prefix - url prefix
:param path - folder with files
"""
assert prefix.startswith('/')
if not prefix.endswith('/'):
prefix += '/'
route = StaticRoute(name, prefix, path,
expect_handler=expect_handler,
chunk_size=chunk_size,
response_factory=response_factory)
self.register_route(route)
return route
| {
"repo_name": "morgan-del/aiohttp",
"path": "aiohttp/web_urldispatcher.py",
"copies": "3",
"size": "13345",
"license": "apache-2.0",
"hash": -1940640657349346600,
"line_mean": 30.1072261072,
"line_max": 79,
"alpha_frac": 0.5659797677,
"autogenerated": false,
"ratio": 4.224438113327002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 429
} |
"""All URLs for the extension."""
from django.conf.urls import include, url
from rest_framework.urlpatterns import format_suffix_patterns
from geokey_airquality import views
exportpatterns = [
url(r'^admin/airquality/export/(?P<file>[\w-]+)$',
views.AQExportView.as_view(),
name='export'),
]
datapatterns = format_suffix_patterns(exportpatterns, allowed=['csv'])
urlpatterns = [
url(
r'^', include(datapatterns)),
# ###########################
# ADMIN PAGES
# ###########################
url(r'^admin/airquality/$',
views.AQIndexView.as_view(),
name='index'),
url(r'^admin/airquality/add/$',
views.AQAddView.as_view(),
name='add'),
url(r'^admin/airquality/(?P<project_id>[0-9]+)/$',
views.AQProjectView.as_view(),
name='project'),
url(r'^admin/airquality/(?P<project_id>[0-9]+)/remove/$',
views.AQRemoveView.as_view(),
name='remove'),
# ###########################
# ADMIN AJAX
# ###########################
url(r'^ajax/airquality/'
r'projects/(?P<project_id>[0-9]+)/$',
views.AQProjectsSingleAjaxView.as_view(),
name='ajax_projects_single'),
url(r'^ajax/airquality/'
r'projects/(?P<project_id>[0-9]+)/'
r'categories/(?P<category_id>[0-9]+)/$',
views.AQCategoriesSingleAjaxView.as_view(),
name='ajax_categories_single'),
# ###########################
# PUBLIC API
# ###########################
url(r'^api/airquality/'
r'sheet/$',
views.AQSheetAPIView.as_view(),
name='api_sheet'),
url(r'^api/airquality/'
r'locations/$',
views.AQLocationsAPIView.as_view(),
name='api_locations'),
url(r'^api/airquality/'
r'locations/(?P<location_id>[0-9]+)/$',
views.AQLocationsSingleAPIView.as_view(),
name='api_locations_single'),
url(r'^api/airquality/'
r'locations/(?P<location_id>[0-9]+)/'
r'measurements/$',
views.AQMeasurementsAPIView.as_view(),
name='api_measurements'),
url(r'^api/airquality/'
r'locations/(?P<location_id>[0-9]+)/'
r'measurements/(?P<measurement_id>[0-9]+)/$',
views.AQMeasurementsSingleAPIView.as_view(),
name='api_measurements_single'),
url(r'^api/airquality/'
r'projects/$',
views.AQProjectsAPIView.as_view(),
name='api_projects'),
]
| {
"repo_name": "ExCiteS/geokey-airquality",
"path": "geokey_airquality/urls.py",
"copies": "1",
"size": "2455",
"license": "mit",
"hash": -3789967503000551400,
"line_mean": 28.578313253,
"line_max": 70,
"alpha_frac": 0.5401221996,
"autogenerated": false,
"ratio": 3.6478454680534917,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46879676676534915,
"avg_score": null,
"num_lines": null
} |
"""All URLs for the extension."""
from django.conf.urls import url
from .views import (
IndexPage,
AllDataImportsPage,
AddDataImportPage,
SingleDataImportPage,
DataImportCreateCategoryPage,
DataImportAssignFieldsPage,
DataImportAllDataFeaturesPage,
RemoveDataImportPage
)
urlpatterns = [
# ###########################
# ADMIN PAGES
# ###########################
url(
r'^admin/dataimports/$',
IndexPage.as_view(),
name='index'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/$',
AllDataImportsPage.as_view(),
name='all_dataimports'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/add/$',
AddDataImportPage.as_view(),
name='dataimport_add'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/(?P<dataimport_id>[0-9]+)/$',
SingleDataImportPage.as_view(),
name='single_dataimport'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/(?P<dataimport_id>[0-9]+)/create-category/$',
DataImportCreateCategoryPage.as_view(),
name='dataimport_create_category'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/(?P<dataimport_id>[0-9]+)/assign-fields/$',
DataImportAssignFieldsPage.as_view(),
name='dataimport_assign_fields'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/(?P<dataimport_id>[0-9]+)/'
r'datafeatures/$',
DataImportAllDataFeaturesPage.as_view(),
name='dataimport_all_datafeatures'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'dataimports/(?P<dataimport_id>[0-9]+)/remove/$',
RemoveDataImportPage.as_view(),
name='dataimport_remove')
]
| {
"repo_name": "ExCiteS/geokey-dataimports",
"path": "geokey_dataimports/urls.py",
"copies": "1",
"size": "1868",
"license": "mit",
"hash": 843526413075476400,
"line_mean": 29.1290322581,
"line_max": 67,
"alpha_frac": 0.5695931478,
"autogenerated": false,
"ratio": 3.5854126679462572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4655005815746257,
"avg_score": null,
"num_lines": null
} |
"""All URLs for the extension."""
from django.conf.urls import url
from .views import (
IndexPage,
AllWebResourcesPage,
AddWebResourcePage,
SingleWebResourcePage,
RemoveWebResourcePage,
ReorderWebResourcesAjax,
UpdateWebResourceAjax,
AllWebResourcesAPI,
SingleWebResourceAPI
)
urlpatterns = [
# ###########################
# ADMIN PAGES
# ###########################
url(
r'^admin/webresources/$',
IndexPage.as_view(),
name='index'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/$',
AllWebResourcesPage.as_view(),
name='all_webresources'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/add/$',
AddWebResourcePage.as_view(),
name='webresource_add'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
SingleWebResourcePage.as_view(),
name='single_webresource'),
url(
r'^admin/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/remove/$',
RemoveWebResourcePage.as_view(),
name='webresource_remove'),
# ###########################
# ADMIN AJAX
# ###########################
url(
r'^ajax/projects/(?P<project_id>[0-9]+)/'
r'webresources/reorder/$',
ReorderWebResourcesAjax.as_view(),
name='ajax_webresources_reorder'),
url(
r'^ajax/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
UpdateWebResourceAjax.as_view(),
name='ajax_webresource_update'),
# ###########################
# PUBLIC API
# ###########################
url(
r'^api/projects/(?P<project_id>[0-9]+)/'
r'webresources/$',
AllWebResourcesAPI.as_view(),
name='api_all_webresources'),
url(
r'^api/projects/(?P<project_id>[0-9]+)/'
r'webresources/(?P<webresource_id>[0-9]+)/$',
SingleWebResourceAPI.as_view(),
name='api_single_webresource')
]
| {
"repo_name": "ExCiteS/geokey-webresources",
"path": "geokey_webresources/urls.py",
"copies": "1",
"size": "2129",
"license": "mit",
"hash": 7454576996403766000,
"line_mean": 26.6493506494,
"line_max": 60,
"alpha_frac": 0.5232503523,
"autogenerated": false,
"ratio": 3.7026086956521738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9725859047952173,
"avg_score": 0,
"num_lines": 77
} |
__all__ = ['UserItemRecommender']
try:
from recsys.datamodel.data import Data
from recsys.algorithm.factorize import SVDNeighbourhood
except ImportError:
pass
from .backend import Backend
K = 100
MIN_VALUES = 2
N = 10
class Recommender(Backend):
def load(self, dataset, write=None):
self._model = None
data = self._convert_hash(dataset['data'])
self._data = data
if write:
pass
return True
def predict(self, keys):
return self.recommend(keys)
def recommend(self, keys, n=None, unknown=True):
if n is None:
n = N
self._ensure_trained(read=True)
if type(keys) not in (list, tuple):
raise ValueError
elif len(keys) < 1:
raise ValueError
model = self._model
def call(key):
try:
return model.recommend(key, n=n, only_unknowns=unknown)
except KeyError:
pass
return map(call, keys)
def validate(self):
pass
def predict_probabilities(self, value):
pass
def _convert_hash(self, dataset):
data = Data()
for key in dataset:
record = dataset[key]
batch = [(record[k], key, k) for k in record]
data.set(batch, extend=True)
return data
class UserItemRecommender(Recommender):
@staticmethod
def model(*args, **kwargs):
return SVDNeighbourhood()
def fit(self, k=K):
self._ensure_loaded()
model = self.model()
model.set_data(self._data)
matrix = model.create_matrix()
self.matrix = matrix
model.compute(k=k, min_values=MIN_VALUES,
mean_center=True, post_normalize=True)
model.set_data(Data())
self._model = model
self._write_model()
return True
| {
"repo_name": "norbert/fickle",
"path": "fickle/recommenders.py",
"copies": "1",
"size": "1886",
"license": "mit",
"hash": -9009157113959560000,
"line_mean": 23.1794871795,
"line_max": 71,
"alpha_frac": 0.5646871686,
"autogenerated": false,
"ratio": 3.9538784067085953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
"""
Main entry point for the allusive-editor
"""
##########################################################################
## Imports
##########################################################################
import Tkinter as tk
from .colors import *
from .widgets import *
##########################################################################
## Constants
##########################################################################
TITLE = "Allusive"
##########################################################################
## App
##########################################################################
class AllusiveApp(tk.Tk, object):
def __init__(self):
tk.Tk.__init__(self)
self.config(background=BG_DARK)
self.wm_state("zoomed")
self.title(TITLE)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
## Initialize the widgets
self.editor = AllusiveEditor(self)
self.viewer = AllusiveViewer(self)
self.editor.text.bind("<Key>", self.sync_text)
self.editor.grid(row=0, column=0, sticky=tk.NW+tk.SE)
self.viewer.grid(row=0, column=1, sticky=tk.NW+tk.SE)
def sync_text(self, event):
self.viewer.text.config(state=tk.NORMAL)
self.viewer.text.insert("insert", event.char)
self.viewer.text.config(state=tk.DISABLED)
def run(self):
"""
Run the primary frame (and any other frames)
"""
tk.mainloop()
class AllusiveEditor(AllusiveFrame):
pass
class AllusiveViewer(AllusiveFrame):
def __init__(self, master, **options):
options['background'] = TWILIGHT
options['readonly'] = True
AllusiveFrame.__init__(self, master, **options)
self.text_config(background=TWILIGHT)
| {
"repo_name": "bbengfort/allusive-editor",
"path": "allusive/app.py",
"copies": "1",
"size": "2127",
"license": "mit",
"hash": -8406025251716671000,
"line_mean": 27.36,
"line_max": 74,
"alpha_frac": 0.5016455101,
"autogenerated": false,
"ratio": 4.005649717514125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9977255632868647,
"avg_score": 0.0060079189490954196,
"num_lines": 75
} |
"""
Defines colors and color functions for use in the App
"""
##########################################################################
## Colors
##########################################################################
WHITE = "#FFFFFF"
# Dark Greys
DARK_WOODS = "#190508"
TWILIGHT = "#2A2C31"
DEPTHS = "#050322"
# Light Blues
BLU = "#1479E4"
# Light Yellows
SUNRISE = "#FFFEDF"
GLOW = "#DFBA69"
HONEY = "#EDD490"
# Dark Reds
RUST = "#823507"
SHALLOW = "#5A2E2E"
##########################################################################
## Widget Constants (FG and BG)
##########################################################################
FG_LITE = WHITE
BG_DARK = DARK_WOODS
BG_CURSOR = BLU
FG_SELECT = TWILIGHT
BG_SELECT = GLOW
| {
"repo_name": "bbengfort/allusive-editor",
"path": "allusive/colors.py",
"copies": "1",
"size": "1095",
"license": "mit",
"hash": 1741915596246103300,
"line_mean": 22.2978723404,
"line_max": 74,
"alpha_frac": 0.4657534247,
"autogenerated": false,
"ratio": 3.308157099697885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9191284330370013,
"avg_score": 0.016525238805574206,
"num_lines": 47
} |
"""All utils for the WeGovNow extension."""
from django.utils.text import slugify
from django.contrib.sites.models import Site
from allauth.socialaccount.adapter import get_adapter
from allauth_uwum.views import UWUMAdapter, UWUMView
from geokey.users.models import User
def get_uwum_view(request):
"""Get the UWUM view (with adapter) passing the request."""
view = UWUMView()
view.request = request
view.adapter = UWUMAdapter(view.request)
return view
def sign_up_uwum_user(request, response):
"""Sign up the UWUM user automatically on GeoKey."""
view = get_uwum_view(request)
provider = view.adapter.get_provider()
sociallogin = provider.sociallogin_from_response(request, response)
return get_adapter(request).save_user(request, sociallogin, form=None)
def make_email_address(username):
"""Make email address using the current domain."""
return '{username}@user.{domain}'.format(
username=slugify(username),
domain=Site.objects.get_current().domain)
def generate_display_name(username):
"""Generate display name for the UWUM user."""
display_name = username
suffix = 2
while User.objects.filter(display_name=display_name).exists():
display_name = '%s %s' % (username, suffix)
suffix += 1
return display_name
def generate_fake_email(username):
"""Generate fake email for the UWUM user."""
email = make_email_address(username)
suffix = 2
while User.objects.filter(email=email).exists():
email = make_email_address('%s %s' % (username, suffix))
suffix += 1
return email
def set_coordinates_precision(coords, precision):
"""Set precision for coordinates."""
result = []
try:
return round(coords, int(precision))
except TypeError:
for coord in coords:
result.append(set_coordinates_precision(coord, precision))
return result
def set_geometry_precision(geometry, precision):
"""Set precision for geometry."""
coordinates = set_coordinates_precision(geometry['coordinates'], precision)
geometry['coordinates'] = coordinates
return geometry
| {
"repo_name": "ExCiteS/geokey-wegovnow",
"path": "geokey_wegovnow/utils.py",
"copies": "1",
"size": "2156",
"license": "mit",
"hash": 9097837286405053000,
"line_mean": 28.5342465753,
"line_max": 79,
"alpha_frac": 0.6910946197,
"autogenerated": false,
"ratio": 3.92,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51110946197,
"avg_score": null,
"num_lines": null
} |
__all__ = ["validate", "customtype"]
__author__ = "Carl Bordum Hansen"
__license__ = "MIT"
import collections.abc
import reprlib
# Open questions:
# how should sets be handled?
# should we handle collections.abc.MappingView?
def _make_typeerror(structure, data):
error_msg = "%s is of type %s, expected type %s" % (
reprlib.repr(data),
data.__class__.__name__,
structure.__class__.__name__,
)
return TypeError(error_msg)
def customtype(check_function):
"""Decorate a function, so it can be used for type checking.
Example
-------
>>> @customtype
... def two_item_list(lst):
... if len(lst) != 2:
... raise TypeError('length %d!!!' % len(lst))
...
>>> validate([two_item_list], [[1, 2], [3, 4]])
>>> validate([two_item_list], [[1, 2], [3, 4, 5]])
TypeError: length 3!!!
Note
----
Sets the `check_function.__datatyping_validate` attribute.
Parameters
----------
check_function : function
Function that should be used to type check.
"""
check_function.__datatyping_validate = True
return check_function
def validate(structure, data, *, strict=True):
"""Verify that values in a dataset are of correct types.
Example
-------
>>> validate([str], ['a', 'b', 'c'])
>>> validate(
... {'id': int, 'lucky_numbers': [int]},
... {'id': 700, 'lucky_numbers': [1, 3, 7, 13]}
... )
>>> validate([int], [1, 2, 3, 4.5])
TypeError: 4.5 is of type float, expected type int.
Parameters
----------
structure : type or collection of types
The data structure that `data` should follow.
data : anything
The data you want type checked.
strict : bool
Dicts in `data` must have the **exact** keys specified in
`structure`. No more.
Raises
------
TypeError
If an elements in `data` has wrong type.
ValueError
If the length of `structure` doesn't make sense for validating
`data`.
KeyError
If a dict in `data` misses a key or `strict` is True and a dict
has keys not in `structure`.
"""
if hasattr(structure, "__datatyping_validate"):
structure(data)
elif isinstance(structure, collections.abc.Sequence) and not isinstance(data, str):
if not isinstance(data, type(structure)):
raise _make_typeerror(structure, data)
if len(structure) == 1:
for item in data:
validate(structure[0], item, strict=strict)
return # success
if len(structure) != len(data):
error_msg = ("%s has the wrong length. Expected %d, got %d.") % (
reprlib.repr(data),
len(structure),
len(data),
)
raise ValueError(error_msg)
for type_, item in zip(structure, data):
validate(type_, item, strict=strict)
elif isinstance(structure, collections.abc.Mapping):
if not isinstance(data, type(structure)):
raise _make_typeerror(structure, data)
if strict and len(structure) != len(data):
raise KeyError(reprlib.repr(set(structure.keys()) ^ set(data.keys())))
for key, type_ in structure.items():
item = data[key] # or KeyError :)
validate(type_, item, strict=strict)
elif not isinstance(data, structure): # structure is a type here
error_msg = "%s is of type %s, expected type %s" % (
reprlib.repr(data),
data.__class__.__name__,
structure.__name__,
)
raise TypeError(error_msg)
| {
"repo_name": "Zaab1t/datatyping",
"path": "datatyping/datatyping.py",
"copies": "1",
"size": "3739",
"license": "mit",
"hash": -2047387864788691200,
"line_mean": 30.4201680672,
"line_max": 87,
"alpha_frac": 0.5530890612,
"autogenerated": false,
"ratio": 4.007502679528403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060591740728403,
"avg_score": null,
"num_lines": null
} |
"""All validators for the WeGovNow extension."""
from oauth2_provider.oauth2_validators import OAuth2Validator
from allauth.socialaccount.models import SocialAccount
from geokey_wegovnow.utils import get_uwum_view, sign_up_uwum_user
class UWUMOAuth2Validator(OAuth2Validator):
"""UWUM OAuth2 validator."""
def validate_bearer_token(self, token, scopes, request):
"""
Check bearer token provided with the request.
First, check if token is an UWUM access token. If it is, get the GeoKey
user associated with the UWUM account. If account does not exist yeat,
create one automatically. But if token is not validated by UWUM, try
and validate it with personal GeoKey OAuth2.
"""
if not token:
return False
# Valid UWUM token?
view = get_uwum_view(request)
response = view.adapter.validate_user(token)
if response.status_code == 200:
response = response.json()
uid = response.get('member', {}).get('id')
try:
# Is related user already created on GeoKey?
account = SocialAccount.objects.select_related('user').get(
uid=uid, provider=view.adapter.get_provider().id)
user = account.user
except SocialAccount.DoesNotExist:
# If no - create one!
user = sign_up_uwum_user(request, response)
request.user = user
request.user.uwum = True
request.scopes = scopes
# That's it, we have the user! It's safe to terminate here.
return True
# If token is not validated by UWUM, maybe it's GeoKey token?
return super(UWUMOAuth2Validator, self).validate_bearer_token(
token, scopes, request)
| {
"repo_name": "ExCiteS/geokey-wegovnow",
"path": "geokey_wegovnow/validators.py",
"copies": "1",
"size": "1831",
"license": "mit",
"hash": -4116874235767886000,
"line_mean": 37.1458333333,
"line_max": 79,
"alpha_frac": 0.6198798471,
"autogenerated": false,
"ratio": 4.189931350114416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5309811197214416,
"avg_score": null,
"num_lines": null
} |
"""All valid types for Research, Resources and Models."""
from enum import Enum
class ResearchTypes(Enum):
"""This class contains all types of valid researches.
It's used to check against the names in `research_data.yml`, to validate types in requests
and to guarantee database string integrity.
"""
Plasma = 'Plasma'
EnergyWeapons = 'EnergyWeapons'
MiningEfficiency = 'MiningEfficiency'
class ResourceTypes(Enum):
"""This class contains all types of valid resources.
It's used to check against the costs in `research_data.yml` and `module_data.yml`,
to validate types in requests and to guarantee database string integrity.
"""
Minerals = 'Minerals'
Fuel = 'Fuel'
Nanobots = 'Nanobots'
class ModuleTypes(Enum):
"""This class contains all types of valid modules.
It's used to check against the names in `module_data.yml`, to validate types in requests
and to guarantee database string integrity.
"""
LaserTurret = 'LaserTurret'
PlasmaGenerator = 'PlasmaGenerator'
| {
"repo_name": "Nukesor/spacesurvival",
"path": "server/data/types.py",
"copies": "1",
"size": "1056",
"license": "mit",
"hash": -4760405113571016000,
"line_mean": 27.5405405405,
"line_max": 94,
"alpha_frac": 0.7064393939,
"autogenerated": false,
"ratio": 3.8823529411764706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088792335076471,
"avg_score": null,
"num_lines": null
} |
__all__ = ('VerificationManager', 'verify_user', 'verify_group')
from pypushover import BaseManager, base_url, send
verify_url = base_url + "/users/validate.json"
class VerificationManager(BaseManager):
def __init__(self, app_token):
super(VerificationManager, self).__init__(app_token)
def verify_user(self, user_id, device=None):
"""
Verifies whether a userID is a valid ID
:param device:
:param user_id:
:return :
"""
return verify_user(self._app_token, user_id, device=device)
def verify_group(self, group_id):
"""
Verifies whether a groupID is a valid ID
:param group_id:
:return :
"""
return verify_group(self._app_token, group_id)
def verify_user(app_token, user, device=None):
"""
Verifies whether a userID is a valid ID if device is given, then the user/device pair is verified.
:param device:
:param app_token: the application token
:param user: the user id
:return :
"""
param_data = {
'token': app_token,
'user': user,
}
if device:
param_data['device'] = device
return send(verify_url, param_data)['status'] == 1 # An HTTPError will be raised if invalid
def verify_group(app_token, group_id):
"""
Verifies whether a groupID is a valid ID.
:param app_token
:param group_id:
:return :
"""
return verify_user(app_token, group_id)
| {
"repo_name": "KronosKoderS/py_pushover",
"path": "pypushover/verification.py",
"copies": "2",
"size": "1478",
"license": "mit",
"hash": -5655506966091031000,
"line_mean": 22.8387096774,
"line_max": 102,
"alpha_frac": 0.6041948579,
"autogenerated": false,
"ratio": 3.71356783919598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.531776269709598,
"avg_score": null,
"num_lines": null
} |
__all__ = ["__version__", "Client"]
__version__ = "0.1.0"
import collections
import json
import memcache
class Client(object):
__slots__ = ["connection"]
def __init__(self, host="127.0.0.1", port=11222):
self.connection = memcache.Client(["%s:%s" % (host, port)])
def get_user_variants(self, exp_ids, user_id):
keys = ['%s:%s' % (exp_id, user_id) for exp_id in exp_ids]
data = self.connection.get_multi(keys)
results = {}
for key, value in data.iteritems():
exp_id, _, _ = key.partition(":")
results[exp_id] = value
return results
def get_user_variant(self, exp_id, user_id):
key = '%s:%s' % (exp_id, user_id)
return self.connection.get(key)
def convert_user(self, exp_id, user_id):
key = '%s:%s' % (exp_id, user_id)
return self.connection.incr(key)
def add_new_experiment(self, data):
data = json.dumps(data)
return self.connection.add("1", data)
def update_experiment(self, exp_id, data):
data = json.dumps(data)
return self.connection.replace(exp_id, data)
def get_experiment(self, exp_id):
data = self.connection.get("experiment:%s" % (exp_id, ))
if data:
return json.loads(data)
return None
def get_active_experiments(self):
data = self.connection.get("experiment:active")
results = {}
if data:
data = json.loads(data)
for exp in data:
results[int(exp['id'])] = exp
return results
def get_all_experiments(self):
data = self.connection.get("experiment:*")
results = {}
if data:
data = json.loads(data)
for exp in data:
results[int(exp['id'])] = exp
return results
def deactivate_experiment(self, exp_id):
return self.connection.delete(exp_id)
def activate_experiment(self, exp_id):
return self.connection.touch(exp_id)
def get_active_experiment_stats(self):
data = self.connection.get_stats()
results = collections.defaultdict(lambda: collections.defaultdict(dict))
for server, stats in data:
for key, value in stats.iteritems():
key, exp_id, bucket_id = key.split(".")
results[int(exp_id)][int(bucket_id)][key] = value
return results
def get_experiment_stats(self, exp_id):
data = self.connection.get_stats(str(exp_id))
results = collections.defaultdict(dict)
for server, stats in data:
for key, value in stats.iteritems():
key, exp_id, bucket_id = key.split(".")
results[int(bucket_id)][int(key)] = value
return results
| {
"repo_name": "brettlangdon/golab-python",
"path": "golab.py",
"copies": "1",
"size": "2785",
"license": "mit",
"hash": -2377516700649848000,
"line_mean": 30.2921348315,
"line_max": 80,
"alpha_frac": 0.5666068223,
"autogenerated": false,
"ratio": 3.7282463186077646,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9794159563435161,
"avg_score": 0.00013871549452073795,
"num_lines": 89
} |
__all__ = ['__version__', 'get_version']
default_app_config = 'djangobb_forum.apps.ForumConfig'
version_info = (0, 0, 1, 'a', 0)
# format: ('major', 'minor', 'micro', 'releaselevel', 'serial')
# __version__ format compatible with distutils2.version.NormalizedVersion
def get_version():
"Returns the version as a human-format string."
version = '%d.%d.%d' % (version_info[:3])
# add revision info if not final version
if version_info[3] != 'f':
import os
version = '%d.%d.%d%s%d' % version_info
dir = os.path.abspath(os.path.dirname(__file__))
hg_dir = os.path.normpath(os.path.join(dir, '../'))
if os.path.isdir(os.path.join(hg_dir, '.hg')):
hg_rev = 'dev0' # unknown version
try:
from mercurial import ui, hg, error
except ImportError:
pass
else:
try:
repo = hg.repository(ui.ui(), hg_dir)
c = repo['tip']
hg_rev = 'dev%s' % (c.rev())
except error.RepoError:
pass
version = '%s.%s' % (version, hg_rev)
return version
__version__ = get_version()
| {
"repo_name": "aldenjenkins/foobargamingwebsite",
"path": "djangobb_forum/__init__.py",
"copies": "3",
"size": "1217",
"license": "bsd-3-clause",
"hash": 6144643921433578000,
"line_mean": 32.8055555556,
"line_max": 73,
"alpha_frac": 0.5160230074,
"autogenerated": false,
"ratio": 3.600591715976331,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000992063492063492,
"num_lines": 36
} |
__all__ = ['__version__', 'get_version']
version_info = (0, 0, 1, 'a', 0)
# format: ('major', 'minor', 'micro', 'releaselevel', 'serial')
# __version__ format compatible with distutils2.version.NormalizedVersion
def get_version():
"Returns the version as a human-format string."
version = '%d.%d.%d' % (version_info[:3])
# add revision info if not final version
if version_info[3] != 'f':
import os
version = '%d.%d.%d%s%d' % version_info
dir = os.path.abspath(os.path.dirname(__file__))
hg_dir = os.path.normpath(os.path.join(dir, '../'))
if os.path.isdir(os.path.join(hg_dir, '.hg')):
hg_rev = 'dev0' # unknown version
try:
from mercurial import ui, hg, error
except ImportError:
pass
else:
try:
repo = hg.repository(ui.ui(), hg_dir)
c = repo['tip']
hg_rev = 'dev%s' % (c.rev())
except error.RepoError:
pass
version = '%s.%s' % (version, hg_rev)
return version
__version__ = get_version()
| {
"repo_name": "tjvr/s2forums",
"path": "djangobb_forum/__init__.py",
"copies": "10",
"size": "1161",
"license": "bsd-3-clause",
"hash": -8238102689583147000,
"line_mean": 33.1470588235,
"line_max": 73,
"alpha_frac": 0.5030146425,
"autogenerated": false,
"ratio": 3.650943396226415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9153958038726415,
"avg_score": null,
"num_lines": null
} |
__all__ = ['view_as_blocks', 'view_as_windows']
import numpy as np
from numpy.lib.stride_tricks import as_strided
from warnings import warn
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in : ndarray
N-d input array.
block_shape : tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out : ndarray
Block view of the input array. If `arr_in` is non-contiguous, a copy
is made.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
[[[76, 77],
[82, 83]]]])
"""
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length "
"as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
if not arr_in.flags.contiguous:
warn(RuntimeWarning("Cannot provide views on a non-contiguous input "
"array without copying."))
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape // block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape, step=1):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in : ndarray
N-d input array.
window_shape : tuple
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
step : int, optional
Number of elements to skip when moving the window forward (by
default, move forward by one). The value must be equal or larger
than one.
Returns
-------
arr_out : ndarray
(rolling) window view of the input array. If `arr_in` is
non-contiguous, a copy is made.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("`arr_in` must be a numpy ndarray")
if not isinstance(window_shape, tuple):
raise TypeError("`window_shape` must be a tuple")
if not (len(window_shape) == arr_in.ndim):
raise ValueError("`window_shape` is incompatible with `arr_in.shape`")
if step < 1:
raise ValueError("`step` must be >= 1")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("`window_shape` is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("`window_shape` is too small")
# -- build rolling window view
if not arr_in.flags.contiguous:
warn(RuntimeWarning("Cannot provide views on a non-contiguous input "
"array without copying."))
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple((arr_shape - window_shape) // step + 1) + \
tuple(window_shape)
arr_strides = np.array(arr_in.strides)
new_strides = np.concatenate((arr_strides * step, arr_strides))
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
| {
"repo_name": "SamHames/scikit-image",
"path": "skimage/util/shape.py",
"copies": "1",
"size": "7866",
"license": "bsd-3-clause",
"hash": -7651879159536467000,
"line_mean": 30.464,
"line_max": 78,
"alpha_frac": 0.5199593186,
"autogenerated": false,
"ratio": 3.342966425839354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.93623701888838,
"avg_score": 0.0001111111111111111,
"num_lines": 250
} |
__all__ = ['view_as_blocks', 'view_as_windows']
import numpy as np
from numpy.lib.stride_tricks import as_strided
def view_as_blocks(arr_in, block_shape):
"""Block view of the input n-dimensional array (using re-striding).
Blocks are non-overlapping views of the input array.
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
block_shape: tuple
The shape of the block. Each dimension must divide evenly into the
corresponding dimensions of `arr_in`.
Returns
-------
arr_out: ndarray
Block view of the input array.
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_blocks
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> B = view_as_blocks(A, block_shape=(2, 2))
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[2, 3],
[6, 7]])
>>> B[1, 0, 1, 1]
13
>>> A = np.arange(4*4*6).reshape(4,4,6)
>>> A # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23]],
[[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35],
[36, 37, 38, 39, 40, 41],
[42, 43, 44, 45, 46, 47]],
[[48, 49, 50, 51, 52, 53],
[54, 55, 56, 57, 58, 59],
[60, 61, 62, 63, 64, 65],
[66, 67, 68, 69, 70, 71]],
[[72, 73, 74, 75, 76, 77],
[78, 79, 80, 81, 82, 83],
[84, 85, 86, 87, 88, 89],
[90, 91, 92, 93, 94, 95]]])
>>> B = view_as_blocks(A, block_shape=(1, 2, 2))
>>> B.shape
(4, 2, 3, 1, 2, 2)
>>> B[2:, 0, 2] # doctest: +NORMALIZE_WHITESPACE
array([[[[52, 53],
[58, 59]]],
[[[76, 77],
[82, 83]]]])
"""
# -- basic checks on arguments
if not isinstance(block_shape, tuple):
raise TypeError('block needs to be a tuple')
block_shape = np.array(block_shape)
if (block_shape <= 0).any():
raise ValueError("'block_shape' elements must be strictly positive")
if block_shape.size != arr_in.ndim:
raise ValueError("'block_shape' must have the same length "
"as 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
if (arr_shape % block_shape).sum() != 0:
raise ValueError("'block_shape' is not compatible with 'arr_in'")
# -- restride the array to build the block view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape / block_shape) + tuple(block_shape)
new_strides = tuple(arr_in.strides * block_shape) + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
def view_as_windows(arr_in, window_shape):
"""Rolling window view of the input n-dimensional array.
Windows are overlapping views of the input array, with adjacent windows
shifted by a single row or column (or an index of a higher dimension).
Parameters
----------
arr_in: ndarray
The n-dimensional input array.
window_shape: tuple
Defines the shape of the elementary n-dimensional orthotope
(better know as hyperrectangle [1]_) of the rolling window view.
Returns
-------
arr_out: ndarray
(rolling) window view of the input array.
Notes
-----
One should be very careful with rolling views when it comes to
memory usage. Indeed, although a 'view' has the same memory
footprint as its base array, the actual array that emerges when this
'view' is used in a computation is generally a (much) larger array
than the original, especially for 2-dimensional arrays and above.
For example, let us consider a 3 dimensional array of size (100,
100, 100) of ``float64``. This array takes about 8*100**3 Bytes for
storage which is just 8 MB. If one decides to build a rolling view
on this array with a window of (3, 3, 3) the hypothetical size of
the rolling view (if one was to reshape the view for example) would
be 8*(100-3+1)**3*3**3 which is about 203 MB! The scaling becomes
even worse as the dimension of the input array becomes larger.
References
----------
.. [1] http://en.wikipedia.org/wiki/Hyperrectangle
Examples
--------
>>> import numpy as np
>>> from skimage.util.shape import view_as_windows
>>> A = np.arange(4*4).reshape(4,4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> window_shape = (2, 2)
>>> B = view_as_windows(A, window_shape)
>>> B[0, 0]
array([[0, 1],
[4, 5]])
>>> B[0, 1]
array([[1, 2],
[5, 6]])
>>> A = np.arange(10)
>>> A
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> window_shape = (3,)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(8, 3)
>>> B
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6],
[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
>>> A = np.arange(5*4).reshape(5, 4)
>>> A
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> window_shape = (4, 3)
>>> B = view_as_windows(A, window_shape)
>>> B.shape
(2, 2, 4, 3)
>>> B # doctest: +NORMALIZE_WHITESPACE
array([[[[ 0, 1, 2],
[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14]],
[[ 1, 2, 3],
[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15]]],
[[[ 4, 5, 6],
[ 8, 9, 10],
[12, 13, 14],
[16, 17, 18]],
[[ 5, 6, 7],
[ 9, 10, 11],
[13, 14, 15],
[17, 18, 19]]]])
"""
# -- basic checks on arguments
if not isinstance(arr_in, np.ndarray):
raise TypeError("'arr_in' must be a numpy ndarray")
if not isinstance(window_shape, tuple):
raise TypeError("'window_shape' must be a tuple")
if not (len(window_shape) == arr_in.ndim):
raise ValueError("'window_shape' is incompatible with 'arr_in.shape'")
arr_shape = np.array(arr_in.shape)
window_shape = np.array(window_shape, dtype=arr_shape.dtype)
if ((arr_shape - window_shape) < 0).any():
raise ValueError("'window_shape' is too large")
if ((window_shape - 1) < 0).any():
raise ValueError("'window_shape' is too small")
# -- build rolling window view
arr_in = np.ascontiguousarray(arr_in)
new_shape = tuple(arr_shape - window_shape + 1) + tuple(window_shape)
new_strides = arr_in.strides + arr_in.strides
arr_out = as_strided(arr_in, shape=new_shape, strides=new_strides)
return arr_out
| {
"repo_name": "davehouse/image_diff",
"path": "skssim/shape.py",
"copies": "3",
"size": "7095",
"license": "mit",
"hash": -7262163780960584000,
"line_mean": 29.5818965517,
"line_max": 78,
"alpha_frac": 0.5133192389,
"autogenerated": false,
"ratio": 3.2650713299585825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 232
} |
"""All views are here."""
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import get_object_or_404, redirect
from django.core.urlresolvers import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView, View, DeleteView, TemplateView, FormView
from .forms import CommentForm, PostForm
from .models import Post, Comment
class Protected(View):
"""Protect views that need to show only for authorised users."""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
"""Rewrite standart method and it to decorator."""
return super().dispatch(*args, **kwargs)
class PostList(ListView):
"""Show all your Post objects."""
context_object_name = 'posts'
template_name = 'main/index.html'
def get_queryset(self):
"""Return needed posts."""
return Post.objects.filter(published_date__lte=timezone.now())
class PostDetail(DetailView):
"""Show info about one post you have chosen."""
model = Post
template_name = 'main/post_detail.html'
class NewPost(FormView, Protected):
"""Return page for adding new Post."""
form_class = PostForm
template_name = 'main/post_edit.html'
def form_valid(self, form):
"""Add info to form that were not given from POST request."""
form.instance.author = self.request.user
return super().form_valid(form)
def post(self, request, *args, **kwargs):
"""Get info from form and save it as 'post' object."""
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('post_detail', pk=post.pk)
class EditPost(UpdateView, Protected):
"""View is for editing post."""
model = Post
fields = ['title', 'text']
success_url = reverse_lazy('post_list')
template_name = 'main/post_edit.html'
class PostDraftList(ListView, Protected):
"""Return draft list."""
queryset = Post.objects.filter(published_date__isnull=True).order_by('created_date')
context_object_name = 'posts'
template_name = 'main/post_draft_list.html'
class PublishPost(Protected, TemplateView):
"""View for publishing post."""
def get(self, request, *args, **kwargs):
"""Get info about POst pk that we use."""
post = get_object_or_404(Post, pk=kwargs['pk'])
post.publish()
return redirect('post_detail', pk=post.pk)
class RemovePost(Protected, DeleteView):
"""View for deleting posts."""
model = Post
success_url = reverse_lazy('post_list')
template_name = 'main/post_edit.html'
class AddCommentToPost(FormView):
"""If someone wants to create new comment he/she get this view."""
form_class = CommentForm
template_name = 'main/post_edit.html'
def post(self, request, *args, **kwargs):
"""Add comment to DB."""
post = get_object_or_404(Post, pk=kwargs['pk'])
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
class ApproveComment(Protected, TemplateView):
"""Moderate comment."""
def get(self, request, *args, **kwargs):
"""Approve comment and update info in DB."""
comment = get_object_or_404(Comment, pk=kwargs['pk'])
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
class RemoveComment(Protected, DeleteView):
"""Remove comment."""
model = Comment
template_name = 'main/post_edit.html'
def post(self, request, *args, **kwargs):
"""Rewritten standart method."""
comment = get_object_or_404(Comment, pk=kwargs['pk'])
post_pk = comment.post.pk
comment.delete()
return redirect('post_detail', pk=post_pk)
| {
"repo_name": "kpi-web-guild/django-girls-blog-DrEdi",
"path": "main/views.py",
"copies": "1",
"size": "4052",
"license": "mit",
"hash": 1474302079513974500,
"line_mean": 29.2388059701,
"line_max": 107,
"alpha_frac": 0.6473346496,
"autogenerated": false,
"ratio": 3.8517110266159698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49990456762159696,
"avg_score": null,
"num_lines": null
} |
"""All views for the extension."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import CreateView, FormView, TemplateView
from django.shortcuts import redirect
from django.db.models import BooleanField, Q, Case, When
from django.utils.safestring import mark_safe
from django.contrib import messages
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from braces.views import LoginRequiredMixin
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.projects.models import Project
from geokey.projects.views import ProjectContext
from .helpers.context_helpers import does_not_exist_msg
from .helpers.url_helpers import check_url
from .base import STATUS
from .exceptions import URLError
from .models import WebResource
from .forms import WebResourceForm
from .serializers import WebResourceSerializer
# ###########################
# ADMIN PAGES
# ###########################
class IndexPage(LoginRequiredMixin, TemplateView):
"""Main index page."""
template_name = 'wr_index.html'
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all projects (where user is an administrator) and available filters to
the context. It optionally filters projects by the filter provided on
the URL.
Returns
-------
dict
Context.
"""
projects = Project.objects.filter(admins=self.request.user).annotate(
with_webresources=Case(
When(
~Q(webresources__status='deleted') &
Q(webresources__isnull=False),
then=True
),
default=False,
output_field=BooleanField()
)
).distinct()
filters = {}
filter_for_projects = self.request.GET.get('filter')
filter_to_add = 'without-web-resources-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(with_webresources=False)
filters[filter_to_add] = 'Without web resources'
filter_to_add = 'with-web-resources-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(with_webresources=True)
filters[filter_to_add] = 'With web resources'
return super(IndexPage, self).get_context_data(
projects=projects,
filters=filters,
*args,
**kwargs
)
class AllWebResourcesPage(LoginRequiredMixin, ProjectContext, TemplateView):
"""All web resources page."""
template_name = 'wr_all_webresources.html'
class AddWebResourcePage(LoginRequiredMixin, ProjectContext, CreateView):
"""Add new web resource page."""
template_name = 'wr_add_webresource.html'
form_class = WebResourceForm
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
return super(AddWebResourcePage, self).get_context_data(
project_id,
*args,
**kwargs
)
def form_valid(self, form):
"""
Add web resource when form data is valid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
django.http.HttpResponse
Rendered template.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. New web resources cannot be added.'
)
else:
form.instance.project = project
form.instance.creator = self.request.user
try:
form.instance.dataformat = check_url(form.instance.url)
add_another_url = reverse(
'geokey_webresources:webresource_add',
kwargs={
'project_id': project.id
}
)
messages.success(
self.request,
mark_safe(
'The web resource has been added. <a href="%s">'
'Add another web resource.</a>' % add_another_url
)
)
return super(AddWebResourcePage, self).form_valid(form)
except URLError, error:
messages.error(self.request, error.to_html())
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
"""
Set URL redirection when web resource created successfully.
Returns
-------
str
URL for redirection.
"""
return reverse(
'geokey_webresources:all_webresources',
kwargs={
'project_id': self.kwargs['project_id']
}
)
class WebResourceContext(LoginRequiredMixin, ProjectContext):
"""Get web resource mixin."""
def get_context_data(self, project_id, webresource_id, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
a web resource and available status types to the context.
Parameters
----------
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
dict
Context.
"""
context = super(WebResourceContext, self).get_context_data(
project_id,
*args,
**kwargs
)
context['status_types'] = STATUS
try:
context['webresource'] = WebResource.objects.get(
pk=webresource_id,
project=context.get('project')
)
return context
except WebResource.DoesNotExist:
return {
'error': 'Not found.',
'error_description': does_not_exist_msg('Web resource')
}
class SingleWebResourcePage(WebResourceContext, FormView):
"""Single web resource page."""
template_name = 'wr_single_webresource.html'
def get_object(self):
"""
Get and return web resource object.
Returns
-------
geokey_webresource.models.WebResource
Web resource object.
"""
try:
return WebResource.objects.get(
pk=self.kwargs['webresource_id']
)
except WebResource.DoesNotExist:
return None
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID and web resource ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
webresource_id = self.kwargs['webresource_id']
return super(SingleWebResourcePage, self).get_context_data(
project_id,
webresource_id,
*args,
**kwargs
)
def get_form(self, form_class=WebResourceForm):
"""Attach instance object to form data."""
return form_class(instance=self.get_object(), **self.get_form_kwargs())
def form_valid(self, form):
"""
Update web resource when form data is valid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
django.http.HttpResponse
Rendered template.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. Web resources cannot be updated.'
)
else:
try:
form.instance.dataformat = check_url(form.instance.url)
if self.request.POST.get('symbol_clear') == 'true':
form.instance.symbol = None
form.save()
messages.success(
self.request,
mark_safe('The web resource has been updated.')
)
return super(SingleWebResourcePage, self).form_valid(form)
except URLError, error:
messages.error(self.request, error.to_html())
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_webresource.forms.WebResourceForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
"""
Set URL redirection when web resource updated successfully.
Returns
-------
str
URL for redirection.
"""
return reverse(
'geokey_webresources:all_webresources',
kwargs={
'project_id': self.kwargs['project_id']
}
)
class RemoveWebResourcePage(WebResourceContext, TemplateView):
"""Remove web resource page."""
template_name = 'base.html'
def get(self, request, project_id, webresource_id):
"""
GET method for removing web resource.
Parameters
----------
request : django.http.HttpRequest
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
django.http.HttpResponseRedirect
Redirects to all web resources if web resource is removed, single
web resource page if project is locked.
django.http.HttpResponse
Rendered template if project or web resource does not exist.
"""
context = self.get_context_data(project_id, webresource_id)
webresource = context.get('webresource')
if webresource:
if webresource.project.islocked:
messages.error(
request,
'The project is locked. Web resource cannot be removed.'
)
return redirect(
'geokey_webresources:single_webresource',
project_id=project_id,
webresource_id=webresource_id
)
else:
webresource.delete()
messages.success(
request,
'The web resource has been removed.'
)
return redirect(
'geokey_webresources:all_webresources',
project_id=project_id
)
return self.render_to_response(context)
# ###########################
# ADMIN AJAX
# ###########################
class ReorderWebResourcesAjax(APIView):
"""Reorder web resources via Ajax."""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
POST method for reordering web resources.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.as_admin(request.user, project_id)
if project.islocked:
return Response(
{'error': 'Project is locked.'},
status=status.HTTP_403_FORBIDDEN
)
elif not project.webresources.exists():
return Response(
{'error': 'Project has no web resources.'},
status=status.HTTP_404_NOT_FOUND
)
try:
webresources = []
for order, webresource_id in enumerate(request.data.get('order')):
webresource = project.webresources.get(pk=webresource_id)
webresource.order = order
webresources.append(webresource)
for webresource in webresources:
webresource.save()
serializer = WebResourceSerializer(
project.webresources,
many=True
)
return Response(serializer.data)
except WebResource.DoesNotExist:
return Response(
{'error': 'One or more web resources were not found.'},
status=status.HTTP_400_BAD_REQUEST
)
class UpdateWebResourceAjax(APIView):
"""Update web resource via Ajax."""
@handle_exceptions_for_ajax
def put(self, request, project_id, webresource_id):
"""
PUT method for updating web resource.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.as_admin(request.user, project_id)
if project.islocked:
return Response(
{'error': 'Project is locked.'},
status=status.HTTP_403_FORBIDDEN
)
try:
webresource = project.webresources.get(pk=webresource_id)
serializer = WebResourceSerializer(
webresource,
data=request.data,
partial=True,
fields=('id', 'status')
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
except WebResource.DoesNotExist, error:
return Response(
{'error': str(error)},
status=status.HTTP_404_NOT_FOUND
)
# ###########################
# PUBLIC API
# ###########################
class AllWebResourcesAPI(APIView):
"""All web resources via API."""
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
GET method for all web resources of a project.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.get_single(request.user, project_id)
serializer = WebResourceSerializer(
project.webresources.filter(status=STATUS.active),
many=True
)
return Response(serializer.data)
class SingleWebResourceAPI(APIView):
"""Single web resource via API."""
@handle_exceptions_for_ajax
def get(self, request, project_id, webresource_id):
"""
GET method for a single web resource of a project.
Only active web resources are returned to anyone who has access to the
project.
Parameters
----------
request : rest_framework.request.Request
Object representing the request.
project_id : int
Identifies the project in the database.
webresource_id : int
Identifies the web resource in the database.
Returns
-------
rest_framework.response.Response
Response to the request.
"""
project = Project.objects.get_single(request.user, project_id)
try:
webresource = project.webresources.get(
pk=webresource_id,
status=STATUS.active
)
serializer = WebResourceSerializer(webresource)
return Response(serializer.data)
except WebResource.DoesNotExist:
return Response(
{'error': 'Web resource not found.'},
status=status.HTTP_404_NOT_FOUND
)
| {
"repo_name": "ExCiteS/geokey-webresources",
"path": "geokey_webresources/views.py",
"copies": "1",
"size": "18103",
"license": "mit",
"hash": -3148523232075520500,
"line_mean": 28.823723229,
"line_max": 79,
"alpha_frac": 0.5449373032,
"autogenerated": false,
"ratio": 4.92732716385411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 607
} |
"""All views for the extension."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import collections
import operator
import csv
import StringIO
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.generic import View, TemplateView
from django.template.defaultfilters import date as filter_date
from django.shortcuts import redirect
from django.utils import timezone, dateformat
from django.contrib import messages
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from braces.views import LoginRequiredMixin
from geokey.core.decorators import handle_exceptions_for_ajax
from geokey.projects.models import Project
from geokey.projects.serializers import ProjectSerializer
from geokey.categories.models import Category, Field
from geokey.categories.serializers import CategorySerializer
from geokey.contributions.serializers import ContributionSerializer
from geokey.extensions.mixins import SuperuserMixin
from geokey_airquality.models import (
AirQualityProject,
AirQualityCategory,
AirQualityField,
AirQualityLocation,
AirQualityMeasurement
)
from geokey_airquality.serializers import (
LocationSerializer,
MeasurementSerializer
)
permission_denied = 'Managing Air Quality is for superusers only.'
# ###########################
# ADMIN PAGES
# ###########################
class AQIndexView(LoginRequiredMixin, SuperuserMixin, TemplateView):
"""Main index page."""
template_name = 'aq_index.html'
exception_message = permission_denied
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all Air Quality projects to the context.
Returns
-------
dict
Context.
"""
return super(AQIndexView, self).get_context_data(
projects=AirQualityProject.objects.all(),
total_locations=AirQualityLocation.objects.count(),
total_measurements=AirQualityMeasurement.objects.count(),
*args,
**kwargs
)
class AQExportView(View):
"""A view to export all measurements."""
def get(self, request, file, *args, **kwargs):
"""
GET method for the view.
Export all measurements to a CSV file.
Parameters
----------
request : django.http.HttpRequest
Represents the request.
file : str
Identifies the file name.
Returns
-------
django.http.HttpResponse
CSV file.
"""
if not request.user.is_superuser:
return HttpResponse(status=403)
out = HttpResponse(content_type='text/csv')
out['Content-Disposition'] = 'attachment; filename="%s - %s.csv"' % (
'Measurements',
dateformat.format(timezone.now(), 'l, jS \\o\\f F, Y')
)
fieldnames = [
'Barcode',
'Location',
'Site characteristics',
'Height from ground (m)',
'Distance from the road (m)',
'Additional details',
'Date out',
'Date in',
'Time out',
'Time in',
'Exposure time (min)',
'Exposure time (hr)',
'Diffusion tube made by students',
'Added by'
]
writer = csv.DictWriter(out, fieldnames=fieldnames)
writer.writeheader()
for measurement in AirQualityMeasurement.objects.all():
location = measurement.location
if measurement.finished:
exposure = measurement.finished - measurement.started
exposure_min = int(exposure.total_seconds() / 60)
exposure_hr = int(exposure.total_seconds() / 3600)
date_in = filter_date(measurement.finished, 'd/m/Y')
time_in = filter_date(measurement.finished, 'H:i')
else:
exposure_min = None
exposure_hr = None
date_in = None
time_in = None
row = {
'Barcode': measurement.barcode,
'Location': location.name,
'Site characteristics': location.properties.get(
'characteristics'),
'Height from ground (m)': location.properties.get(
'height'),
'Distance from the road (m)': location.properties.get(
'distance'),
'Additional details': measurement.properties.get(
'additional_details'),
'Date out': filter_date(measurement.started, 'd/m/Y'),
'Date in': date_in,
'Time out': filter_date(measurement.started, 'H:i'),
'Time in': time_in,
'Exposure time (min)': exposure_min,
'Exposure time (hr)': exposure_hr,
'Diffusion tube made by students': measurement.properties.get(
'made_by_students'),
'Added by': measurement.creator.display_name
}
writer.writerow({key: str(value).encode('utf-8') if value else None for key, value in row.iteritems()})
return out
class AQAddView(LoginRequiredMixin, SuperuserMixin, TemplateView):
"""Add new Air Quality project page."""
template_name = 'aq_add.html'
exception_message = permission_denied
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all GeoKey projects, available category and field types to the context.
Returns
-------
dict
Context.
"""
category_types = collections.OrderedDict(
sorted(dict(AirQualityCategory.TYPES).items())
)
field_types = collections.OrderedDict(
sorted(
dict(AirQualityField.TYPES).items(),
key=operator.itemgetter(1)
)
)
return super(AQAddView, self).get_context_data(
projects=Project.objects.filter(status='active'),
category_types=category_types,
field_types=field_types,
*args,
**kwargs
)
def post(self, request):
"""
POST method for adding a new Air Quality project.
Parameters
----------
request : django.http.HttpRequest
Represents the request.
Returns
-------
django.http.HttpResponseRedirect
When project is added, the success message is rendered, when
redirected to the index page.
django.http.HttpResponse
Rendered template with an error message.
"""
data = request.POST
context = self.get_context_data()
missing = False
project = data.get('project')
category_types = context.get('category_types')
field_types = context.get('field_types')
categories = {}
if category_types is not None:
for key, value in category_types.items():
try:
categories[key] = data.get(key)
except:
missing = True
if field_types is not None:
for key, value in field_types.items():
try:
data.getlist(key)
except:
missing = True
if project and missing is False:
try:
project = Project.objects.get(pk=project, status='active')
aq_project = AirQualityProject.objects.create(
status='active',
creator=request.user,
project=project
)
try:
for key, value in categories.items():
category = Category.objects.get(
pk=value,
status='active'
)
aq_category = AirQualityCategory.objects.create(
type=category_types.get(key),
category=category,
project=aq_project
)
index = int(key) - 1
try:
for key, value in field_types.items():
list = data.getlist(key)
field = list[index]
field = Field.objects.get(
pk=field,
status='active'
)
AirQualityField.objects.create(
type=field_types.get(key),
field=field,
category=aq_category
)
except Field.DoesNotExist:
messages.error(self.request, 'Field not found.')
aq_project.delete()
return self.render_to_response(context)
except Category.DoesNotExist:
messages.error(self.request, 'Category not found.')
aq_project.delete()
return self.render_to_response(context)
project.islocked = True
project.save()
messages.success(
self.request,
'The project has been added.'
)
return redirect('geokey_airquality:index')
except Project.DoesNotExist:
messages.error(self.request, 'Project not found.')
messages.error(self.request, 'An error occurred.')
return self.render_to_response(context)
class AQProjectView(LoginRequiredMixin, SuperuserMixin, TemplateView):
"""Air Quality project page."""
template_name = 'aq_project.html'
exception_message = permission_denied
def get_context_data(self, project_id, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all Geokey projects, current Air Quality project, available category
and field types to the context.
Parameters
----------
project_id : int
Identifies the project in the database.
Returns
-------
dict
Context.
"""
try:
project = AirQualityProject.objects.get(pk=project_id)
except AirQualityProject.DoesNotExist:
return {
'error': 'Not found.',
'error_description': 'Project not found.'
}
category_types = collections.OrderedDict(
sorted(dict(AirQualityCategory.TYPES).items())
)
field_types = collections.OrderedDict(
sorted(
dict(AirQualityField.TYPES).items(),
key=operator.itemgetter(1)
)
)
return super(AQProjectView, self).get_context_data(
projects=Project.objects.filter(status='active'),
project=project,
category_types=category_types,
field_types=field_types,
*args,
**kwargs
)
def post(self, request, project_id):
"""
POST method for updating Air Quality project.
Parameters
----------
request : django.http.HttpRequest
Represents the request.
project_id : int
Identifies the project in the database.
Returns
-------
django.http.HttpResponseRedirect
When project is updated, the success message is rendered, when
redirected to the index page.
django.http.HttpResponse
Rendered template with an error message.
"""
data = request.POST
context = self.get_context_data(project_id)
missing = False
project = data.get('project')
aq_project = context.get('project')
category_types = context.get('category_types')
field_types = context.get('field_types')
categories = {}
if category_types is not None:
for key, value in category_types.items():
try:
categories[key] = data.get(key)
except:
missing = True
if field_types is not None:
for key, value in field_types.items():
try:
data.getlist(key)
except:
missing = True
if aq_project is not None and missing is False:
try:
error = False
project = Project.objects.get(pk=project, status='active')
# Changing project should not be allowed, but just in case...
if aq_project.project != project:
aq_project.project = project
aq_project.status = 'active'
aq_project.save()
try:
for key, value in categories.items():
category = Category.objects.get(
pk=value,
status='active'
)
try:
aq_category = AirQualityCategory.objects.get(
type=category_types.get(key),
project=aq_project
)
if aq_category.category != category:
aq_category.category = category
aq_category.save()
except AirQualityCategory.DoesNotExist:
aq_category = AirQualityCategory.objects.create(
type=category_types.get(key),
category=category,
project=aq_project
)
index = int(key) - 1
try:
for key, value in field_types.items():
list = data.getlist(key)
field = list[index]
field = Field.objects.get(
pk=field,
status='active'
)
try:
aq_field = AirQualityField.objects.get(
type=field_types.get(key),
category=aq_category
)
if aq_field.field != field:
aq_field.field = field
aq_field.save()
except AirQualityField.DoesNotExist:
AirQualityField.objects.create(
type=field_types.get(key),
field=field,
category=aq_category
)
except Field.DoesNotExist:
missing = True
aq_category.delete()
aq_project.delete()
error = True
except Category.DoesNotExist:
missing = True
aq_project.delete()
error = True
if error is False:
messages.success(
self.request,
'The project has been updated.'
)
return redirect('geokey_airquality:index')
except Project.DoesNotExist:
messages.error(self.request, 'Project not found.')
messages.error(self.request, 'An error occurred.')
return self.render_to_response(context)
class AQRemoveView(LoginRequiredMixin, SuperuserMixin, TemplateView):
"""Remove Air Quality project page."""
template_name = 'base.html'
exception_message = permission_denied
def get(self, request, project_id):
"""
GET method for removing Air Quality project.
Parameters
----------
request : django.http.HttpRequest
Represents the request.
project_id : int
Identifies the project in the database.
Returns
-------
django.http.HttpResponseRedirect
When project is removed, the success message is rendered, when
redirected to the index page.
django.http.HttpResponse
Renders success or error message.
"""
try:
project = AirQualityProject.objects.get(pk=project_id)
project.delete()
messages.success(self.request, 'The project has been removed.')
except AirQualityProject.DoesNotExist:
messages.error(self.request, 'Project not found.')
return redirect('geokey_airquality:index')
# ###########################
# ADMIN AJAX
# ###########################
class AQProjectsSingleAjaxView(APIView):
"""
Ajax API endpoints for a single project.
"""
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
Gets the serialized project.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
project_id : int
Identifies the project in the database.
Return
------
rest_framework.response.Response
Contains the serialised project or an error message.
"""
if not request.user.is_superuser:
raise PermissionDenied(permission_denied)
project = Project.objects.get(pk=project_id, status='active')
serializer = ProjectSerializer(project, context={'user': request.user})
return Response(serializer.data)
class AQCategoriesSingleAjaxView(APIView):
"""
Ajax API endpoints for a single category.
"""
@handle_exceptions_for_ajax
def get(self, request, project_id, category_id):
"""
Gets the serialized category.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
project_id : int
Identifies the project in the database.
category_id : int
Identifies the category in the database.
Return
------
rest_framework.response.Response
Contains the serialised category or an error message.
"""
if not request.user.is_superuser:
raise PermissionDenied(permission_denied)
project = Project.objects.get(pk=project_id, status='active')
category = project.categories.get(pk=category_id, status='active')
serializer = CategorySerializer(category)
return Response(serializer.data)
# ###########################
# PUBLIC API
# ###########################
class AQSheetAPIView(APIView):
"""
API endpoint for a sheet.
"""
def get(self, request):
"""
Sends a sheet of finished measurements started by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
Returns
-------
rest_framework.response.Response
Contains empty response indicating successful send of an email or
an error message.
"""
user = request.user
if user.is_anonymous():
return Response(
{'error': 'You have no rights to retrieve a sheet.'},
status=status.HTTP_403_FORBIDDEN
)
out = StringIO.StringIO()
fieldnames = [
'Barcode',
'Location',
'Site characteristics',
'Height from ground (m)',
'Distance from the road (m)',
'Additional details',
'Date out',
'Date in',
'Time out',
'Time in',
'Exposure time (min)',
'Exposure time (hr)',
'Diffusion tube made by students',
]
writer = csv.DictWriter(out, fieldnames=fieldnames)
writer.writeheader()
for measurement in AirQualityMeasurement.objects.filter(
creator=user
).exclude(finished=None).distinct():
location = measurement.location
exposure = measurement.finished - measurement.started
row = {
'Barcode': measurement.barcode,
'Location': location.name,
'Site characteristics': location.properties.get(
'characteristics'),
'Height from ground (m)': location.properties.get(
'height'),
'Distance from the road (m)': location.properties.get(
'distance'),
'Additional details': measurement.properties.get(
'additional_details'),
'Date out': filter_date(measurement.started, 'd/m/Y'),
'Date in': filter_date(measurement.finished, 'd/m/Y'),
'Time out': filter_date(measurement.started, 'H:i'),
'Time in': filter_date(measurement.finished, 'H:i'),
'Exposure time (min)': int(exposure.total_seconds() / 60),
'Exposure time (hr)': int(exposure.total_seconds() / 3600),
'Diffusion tube made by students': measurement.properties.get(
'made_by_students')
}
writer.writerow({key: str(value).encode('utf-8') if value else None for key, value in row.iteritems()})
message = mail.EmailMessage(
'Air Quality: Sheet of finished measurements',
'Please find the attached CSV in this email.',
settings.DEFAULT_FROM_EMAIL,
[user.email]
)
message.attach('sheet.csv', out.getvalue(), 'text/csv')
connection = mail.get_connection()
connection.open()
connection.send_messages([message])
connection.close()
return Response(status=status.HTTP_204_NO_CONTENT)
class AQProjectsAPIView(APIView):
"""
API endpoint for all projects.
"""
def get(self, request):
"""
Returns a list of all projects, added to Air Quality. It includes only
active projects, to which current user is allowed to contribute.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
Returns
-------
rest_framework.response.Response
Contains the serialised projects.
"""
user = request.user
if user.is_anonymous():
return Response(
{'error': 'You have no rights to retrieve all projects.'},
status=status.HTTP_403_FORBIDDEN
)
aq_projects = []
for aq_project in AirQualityProject.objects.filter(status='active'):
if aq_project.project.can_contribute(user):
aq_projects.append(aq_project.project)
serializer = ProjectSerializer(
aq_projects, many=True, context={'user': user},
fields=('id', 'name')
)
return Response(serializer.data)
class AQLocationsAPIView(APIView):
"""
API endpoint for all locations.
"""
def get(self, request):
"""
Returns a list of all locations created by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
Returns
-------
rest_framework.response.Response
Contains the serialised locations.
"""
user = request.user
if user.is_anonymous():
return Response(
{'error': 'You have no rights to retrieve all locations.'},
status=status.HTTP_403_FORBIDDEN
)
serializer = LocationSerializer(
AirQualityLocation.objects.filter(creator=user),
many=True,
context={'user': user}
)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request):
"""
Adds a location. Returns created and serialised location.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
Returns
-------
rest_framework.reponse.Response
Contains the serialised location or an error message.
"""
user = request.user
data = request.data
if user.is_anonymous():
return Response(
{'error': 'You have no rights to add a new location.'},
status=status.HTTP_403_FORBIDDEN
)
serializer = LocationSerializer(
data=data, context={'user': user, 'data': data}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
class AQLocationsSingleAPIView(APIView):
"""
API endpoint for a single location.
"""
def patch(self, request, location_id):
"""
Updates a single location created by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
location_id : int
Identifies the location in the database.
Returns
-------
rest_framework.response.Response
Contains the serialised location or an error message.
"""
user = request.user
data = request.data
try:
location = AirQualityLocation.objects.get(pk=location_id)
except AirQualityLocation.DoesNotExist:
return Response(
{'error': 'Location not found.'},
status=status.HTTP_404_NOT_FOUND
)
if user != location.creator:
return Response(
{'error': 'You have no rights to edit this location.'},
status=status.HTTP_403_FORBIDDEN
)
serializer = LocationSerializer(
location, data=data, context={'user': user, 'data': data}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
def delete(self, request, location_id):
"""
Deletes a single location created by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
location_id : int
Identifies the location in the database.
Returns
-------
rest_framework.response.Response
Contains empty response indicating successful delete or an error
message.
"""
try:
location = AirQualityLocation.objects.get(pk=location_id)
except AirQualityLocation.DoesNotExist:
return Response(
{'error': 'Location not found.'},
status=status.HTTP_404_NOT_FOUND
)
if request.user != location.creator:
return Response(
{'error': 'You have no rights to delete this location.'},
status=status.HTTP_403_FORBIDDEN
)
measurements = AirQualityMeasurement.objects.filter(location=location)
for measurement in measurements:
measurement.delete()
location.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class MeasurementAPIMixin(object):
def submit_measurement(self, request, data, instance):
user = request.user
project = request.data.get('project', None)
properties = data.get('properties', None)
if project is not None and properties is not None:
finished = data.get('finished', None)
results = properties.get('results', None)
if finished is not None and results is not None:
try:
project = Project.objects.get(pk=project, status='active')
aq_project = AirQualityProject.objects.get(
status='active',
project=project
)
category_types = dict(AirQualityCategory.TYPES)
field_types = dict(AirQualityField.TYPES)
results = float(results)
if results < 40:
category = category_types['1']
elif results >= 40 and results < 60:
category = category_types['2']
elif results >= 60 and results < 80:
category = category_types['3']
elif results >= 80 and results < 100:
category = category_types['4']
else:
category = category_types['5']
aq_category = AirQualityCategory.objects.get(
type=category,
project=aq_project
)
properties = {}
for key, value in field_types.iteritems():
aq_field = AirQualityField.objects.get(
type=value,
category=aq_category
)
instance_properties = instance.location.properties
value = None
if key == 'results':
value = results
elif key == 'date_out':
value = filter_date(instance.started, 'd/m/Y')
elif key == 'time_out':
value = filter_date(instance.started, 'H:i')
elif key == 'date_collected':
value = filter_date(instance.finished, 'd/m/Y')
elif key == 'time_collected':
value = filter_date(instance.finished, 'H:i')
elif key == 'exposure_min':
value = instance.finished - instance.started
value = int(value.total_seconds() / 60)
elif key == 'distance_from_road':
value = '%sm' % instance_properties.get(
'distance'
)
elif key == 'height':
value = '%sm' % instance_properties.get(
'height'
)
elif key == 'site_characteristics':
value = instance_properties.get(
'characteristics'
)
elif key == 'additional_details':
value = instance.properties.get(
'additional_details'
)
elif key == 'made_by_students':
value = instance.properties.get(
'made_by_students'
)
if value:
value = 'Yes'
else:
value = 'No'
try:
value = aq_field.field.lookupvalues.get(
name=value).id
except Field.DoesNotExist:
return False
if value is not None:
properties[aq_field.field.key] = str(value)
except:
return False
if project.can_contribute(user):
data = {
'type': 'Feature',
'meta': {
'status': 'active',
'category': aq_category.category.id
},
'location': {
'geometry': instance.location.geometry.geojson
},
'properties': properties
}
serializer = ContributionSerializer(
data=data,
context={'user': user, 'project': project}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
instance.delete()
return True
return False
class AQMeasurementsAPIView(MeasurementAPIMixin, APIView):
"""
API endpoint for all measurements.
"""
def post(self, request, location_id):
"""
Adds a measurement. Returns created and serialised measurement.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
location_id : int
Identifies the location in the database.
Returns
-------
rest_framework.reponse.Response
Contains the serialised measurement or an error message.
"""
user = request.user
data = request.data
try:
location = AirQualityLocation.objects.get(pk=location_id)
except AirQualityLocation.DoesNotExist:
return Response(
{'error': 'Location not found.'},
status=status.HTTP_404_NOT_FOUND
)
if request.user != location.creator:
return Response(
{'error': 'You have no rights to add a new measurement.'},
status=status.HTTP_403_FORBIDDEN
)
serializer = MeasurementSerializer(
data=data, context={
'user': user,
'location': location,
'data': data
}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
data = serializer.data
instance = serializer.instance
if self.submit_measurement(request, data, instance):
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class AQMeasurementsSingleAPIView(MeasurementAPIMixin, APIView):
"""
API endpoint for a single measurement.
"""
def patch(self, request, location_id, measurement_id):
"""
Updates a single measurement created by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
location_id : int
Identifies the location in the database.
measurement_id : int
Identifies the measurement in the database.
Returns
-------
rest_framework.response.Response
Contains the serialised measurement or an error message.
"""
user = request.user
data = request.data
try:
measurement = AirQualityMeasurement.objects.get(pk=measurement_id)
except AirQualityMeasurement.DoesNotExist:
return Response(
{'error': 'Measurement not found.'},
status=status.HTTP_404_NOT_FOUND
)
if user != measurement.creator:
return Response(
{'error': 'You have no rights to update this measurement.'},
status=status.HTTP_403_FORBIDDEN
)
serializer = MeasurementSerializer(
measurement, data=data, context={'user': user, 'data': data}
)
if serializer.is_valid(raise_exception=True):
serializer.save()
data = serializer.data
instance = serializer.instance
if self.submit_measurement(request, data, instance):
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(data, status=status.HTTP_200_OK)
def delete(self, request, location_id, measurement_id):
"""
Deletes a single measurement created by the user.
Parameters
----------
request : rest_framework.request.Request
Represents the request.
location_id : int
Identifies the location in the database.
measurement_id : int
Identifies the measurement in the database.
Returns
-------
rest_framework.response.Response
Contains empty response indicating successful delete or an error
message.
"""
try:
measurement = AirQualityMeasurement.objects.get(pk=measurement_id)
except AirQualityMeasurement.DoesNotExist:
return Response(
{'error': 'Measurement not found.'},
status=status.HTTP_404_NOT_FOUND
)
if request.user != measurement.creator:
return Response(
{'error': 'You have no rights to delete this measurement.'},
status=status.HTTP_403_FORBIDDEN
)
measurement.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| {
"repo_name": "ExCiteS/geokey-airquality",
"path": "geokey_airquality/views.py",
"copies": "1",
"size": "38591",
"license": "mit",
"hash": 846304943661054600,
"line_mean": 31.6765453006,
"line_max": 115,
"alpha_frac": 0.5113109274,
"autogenerated": false,
"ratio": 5.251190638182066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6262501565582066,
"avg_score": null,
"num_lines": null
} |
"""All views for the extension."""
# -*- coding: utf-8 -*-
import json
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.views.generic import CreateView, FormView, TemplateView
from django.shortcuts import redirect
from django.db.models import IntegerField, Q, Count, Case, When
from django.contrib import messages
from braces.views import LoginRequiredMixin
from geokey.projects.models import Project
from geokey.projects.views import ProjectContext
from geokey.categories.base import DEFAULT_STATUS
from geokey.categories.models import Category, LookupValue
from geokey.contributions.serializers import ContributionSerializer
from geokey.socialinteractions.models import SocialInteractionPost
from .helpers.context_helpers import does_not_exist_msg
from .base import FORMAT
from .exceptions import FileParseError
from .models import DataImport
from .forms import CategoryForm, DataImportForm
# ###########################
# ADMIN PAGES
# ###########################
class IndexPage(LoginRequiredMixin, TemplateView):
"""Main index page."""
template_name = 'di_index.html'
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all projects (where user is an administrator) and available filters to
the context. It optionally filters projects by the filter provided on
the URL.
Returns
-------
dict
Context.
"""
projects = Project.objects.annotate(
dataimports_count=Count(Case(
When(
~Q(dataimports__status='deleted') &
Q(dataimports__isnull=False),
then=1
),
output_field=IntegerField(),
))
).filter(admins=self.request.user)
filters = {}
filter_for_projects = self.request.GET.get('filter')
filter_to_add = 'without-data-imports-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(dataimports_count=0)
filters[filter_to_add] = 'Without data imports'
filter_to_add = 'with-data-imports-only'
if filter_for_projects == filter_to_add:
projects = projects.filter(dataimports_count__gt=0)
filters[filter_to_add] = 'With data imports'
return super(IndexPage, self).get_context_data(
projects=projects.distinct(),
filters=filters,
*args,
**kwargs
)
class AllDataImportsPage(LoginRequiredMixin, ProjectContext, TemplateView):
"""All data imports page."""
template_name = 'di_all_dataimports.html'
class AddDataImportPage(LoginRequiredMixin, ProjectContext, CreateView):
"""Add new data import page."""
template_name = 'di_add_dataimport.html'
form_class = DataImportForm
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
return super(AddDataImportPage, self).get_context_data(
project_id,
*args,
**kwargs
)
def form_valid(self, form):
"""
Add data import when form data is valid.
Parameters
----------
form : geokey_dataimports.forms.DataImportForm
Represents the user input.
Returns
-------
django.http.HttpResponse
Rendered template.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. New data imports cannot be added.'
)
else:
form.instance.project = project
form.instance.creator = self.request.user
content_type = self.request.FILES.get('file').content_type
if content_type == 'application/json':
form.instance.dataformat = FORMAT.GeoJSON
elif content_type == 'application/vnd.google-earth.kml+xml':
form.instance.dataformat = FORMAT.KML
elif content_type in ['text/csv', 'application/vnd.ms-excel']:
form.instance.dataformat = FORMAT.CSV
else:
messages.error(
self.request,
'The file type does not seem to be compatible with '
'this extension just yet. Only GeoJSON, KML and CSV '
'with WKT formatted geometries formats are supported.'
)
if form.instance.dataformat:
try:
if self.request.POST.get('category_create') == 'false':
try:
category = project.categories.get(
pk=self.request.POST.get('category')
)
form.instance.category = category
except Category.DoesNotExist:
messages.error(
self.request,
'The category does not exist.'
)
return super(AddDataImportPage, self).form_valid(form)
except FileParseError as error:
messages.error(self.request, error.to_html())
messages.success(
self.request,
'The data import has been added.'
)
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_dataimports.forms.DataImportForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
def get_success_url(self):
"""
Set URL redirection when data import created successfully.
Returns
-------
str
URL for redirection.
"""
if self.object.category:
return reverse(
'geokey_dataimports:dataimport_assign_fields',
kwargs={
'project_id': self.kwargs['project_id'],
'dataimport_id': self.object.id
}
)
else:
return reverse(
'geokey_dataimports:dataimport_create_category',
kwargs={
'project_id': self.kwargs['project_id'],
'dataimport_id': self.object.id
}
)
class DataImportContext(LoginRequiredMixin, ProjectContext):
"""Get data import mixin."""
def get_context_data(self, project_id, dataimport_id, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
a data import to the context.
Parameters
----------
project_id : int
Identifies the project in the database.
dataimport_id : int
Identifies the data import in the database.
Returns
-------
dict
Context.
"""
context = super(DataImportContext, self).get_context_data(
project_id,
*args,
**kwargs
)
try:
context['dataimport'] = DataImport.objects.get(
pk=dataimport_id,
project=context.get('project')
)
return context
except DataImport.DoesNotExist:
return {
'error': 'Not found.',
'error_description': does_not_exist_msg('Data import')
}
class SingleDataImportPage(DataImportContext, FormView):
"""Single data import page."""
template_name = 'di_single_dataimport.html'
def get_object(self):
"""
Get and return data import object.
Returns
-------
geokey_dataimports.models.DataImport
Data import object.
"""
try:
return DataImport.objects.get(
pk=self.kwargs['dataimport_id']
)
except DataImport.DoesNotExist:
return None
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID and data import ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
dataimport_id = self.kwargs['dataimport_id']
return super(SingleDataImportPage, self).get_context_data(
project_id,
dataimport_id,
*args,
**kwargs
)
def get_form(self, form_class=DataImportForm):
"""Attach instance object to form data."""
return form_class(instance=self.get_object(), **self.get_form_kwargs())
def form_valid(self, form):
"""
Update data import when form data is valid.
Parameters
----------
form : geokey_dataimports.forms.DataImportForm
Represents the user input.
Returns
-------
django.http.HttpResponseRedirect
Redirects to a single data import when form is saved, assign fields
page when category is selected, create category page when category
does not exist.
django.http.HttpResponse
Rendered template if project or data import does not exist.
"""
context = self.get_context_data(form=form)
project = context.get('project')
if project:
if project.islocked:
messages.error(
self.request,
'The project is locked. Data imports cannot be updated.'
)
else:
form.save()
if not form.instance.category:
try:
form.instance.category = project.categories.get(
pk=self.request.POST.get('category')
)
form.save()
messages.success(
self.request,
'The category has been selected.'
)
return redirect(
'geokey_dataimports:dataimport_assign_fields',
project_id=project.id,
dataimport_id=form.instance.id
)
except Category.DoesNotExist:
messages.error(
self.request,
'The category does not exist. Please create a '
'new category.'
)
return redirect(
'geokey_dataimports:dataimport_create_category',
project_id=project.id,
dataimport_id=form.instance.id
)
messages.success(
self.request,
'The data import has been updated.'
)
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_dataimports.forms.DataImportForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
class DataImportCreateCategoryPage(DataImportContext, CreateView):
"""Create category for data import page."""
template_name = 'di_create_category.html'
form_class = CategoryForm
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
project ID and data import ID to the context.
Returns
-------
dict
Context.
"""
project_id = self.kwargs['project_id']
dataimport_id = self.kwargs['dataimport_id']
return super(DataImportCreateCategoryPage, self).get_context_data(
project_id,
dataimport_id,
*args,
**kwargs
)
def form_valid(self, form):
"""
Create category and fields when form data is valid.
Parameters
----------
form : geokey_dataimports.forms.CategoryForm
Represents the user input.
Returns
-------
django.http.HttpResponseRedirect
Redirects to a single data import when category is created.
django.http.HttpResponse
Rendered template if project or data import does not exist, project
is locked, data import already has a category associated with it,
fields already have been assigned.
"""
data = self.request.POST
context = self.get_context_data(form=form)
dataimport = context.get('dataimport')
if dataimport:
if dataimport.project.islocked:
messages.error(
self.request,
'The project is locked. New categories cannot be created.'
)
elif dataimport.category:
messages.error(
self.request,
'The data import already has a category associated with '
'it. Unfortunately, this cannot be changed.'
)
elif dataimport.keys:
messages.error(
self.request,
'The fields have already been assigned. Unfortunately, '
'this cannot be changed.'
)
else:
dataimport.category = Category.objects.create(
name=form.instance.name,
description=form.instance.description,
project=dataimport.project,
creator=self.request.user,
default_status=DEFAULT_STATUS.active
)
dataimport.save()
ids = data.getlist('ids')
keys = []
if ids:
for datafield in dataimport.datafields.filter(id__in=ids):
field = datafield.convert_to_field(
data.get('fieldname_%s' % datafield.id),
data.get('fieldtype_%s' % datafield.id)
)
keys.append(field.key)
dataimport.keys = keys
dataimport.save()
messages.success(
self.request,
'The category has been created. You may now import the '
'data.'
)
return redirect(
'geokey_dataimports:single_dataimport',
project_id=dataimport.project.id,
dataimport_id=dataimport.id
)
return self.render_to_response(context)
def form_invalid(self, form):
"""
Display an error message when form data is invalid.
Parameters
----------
form : geokey_dataimports.forms.CategoryForm
Represents the user input.
Returns
-------
dict
Context.
"""
messages.error(self.request, 'An error occurred.')
return self.render_to_response(self.get_context_data(form=form))
class DataImportAssignFieldsPage(DataImportContext, TemplateView):
"""Assign fields for data import page."""
template_name = 'di_assign_fields.html'
def post(self, request, project_id, dataimport_id):
"""
POST method for assigning fields.
Parameters
----------
request : django.http.HttpRequest
Object representing the request.
project_id : int
Identifies the project in the database.
dataimport_id : int
Identifies the data import in the database.
Returns
-------
django.http.HttpResponseRedirect
Redirects to a single data import when fields are assigned.
django.http.HttpResponse
Rendered template if project or data import does not exist, project
is locked, data import has no category associated with it, fields
already have been assigned.
"""
data = self.request.POST
context = self.get_context_data(project_id, dataimport_id)
dataimport = context.get('dataimport')
if dataimport:
if dataimport.project.islocked:
messages.error(
request,
'The project is locked. Fields cannot be assigned.'
)
elif not dataimport.category:
messages.error(
request,
'The data import has no category associated with it.'
)
elif dataimport.keys:
messages.error(
request,
'Fields have already been assigned.'
)
else:
ids = data.getlist('ids')
keys = []
if ids:
for datafield in dataimport.datafields.filter(id__in=ids):
key = data.get('existingfield_%s' % datafield.id)
if key:
datafield.key = key
datafield.save()
field = datafield.convert_to_field(
data.get('fieldname_%s' % datafield.id),
data.get('fieldtype_%s' % datafield.id)
)
keys.append(field.key)
dataimport.keys = keys
dataimport.save()
messages.success(
self.request,
'The fields have been assigned. You may now import the '
'data.'
)
return redirect(
'geokey_dataimports:single_dataimport',
project_id=dataimport.project.id,
dataimport_id=dataimport.id
)
return self.render_to_response(context)
class DataImportAllDataFeaturesPage(DataImportContext, TemplateView):
"""Data import all data features page."""
template_name = 'di_all_datafeatures.html'
def get_context_data(self, *args, **kwargs):
"""
GET method for the template.
Return the context to render the view. Overwrite the method by adding
all data features (not imported yet) to the context.
Returns
-------
dict
Context.
"""
context = super(DataImportAllDataFeaturesPage, self).get_context_data(
*args,
**kwargs
)
dataimport = context.get('dataimport')
if dataimport:
datafeatures = []
for datafeature in dataimport.datafeatures.filter(imported=False):
datafeatures.append({
'type': 'Feature',
'id': datafeature.id,
'geometry': json.loads(datafeature.geometry.json)
})
context['datafeatures'] = {
'type': 'FeatureCollection',
'features': datafeatures
}
return context
def post(self, request, project_id, dataimport_id):
"""
POST method for converting data features to contributions.
Parameters
----------
request : django.http.HttpRequest
Object representing the request.
project_id : int
Identifies the project in the database.
dataimport_id : int
Identifies the data import in the database.
Returns
-------
django.http.HttpResponseRedirect
Redirects to a single data import when fields are assigned.
django.http.HttpResponse
Rendered template if project or data import does not exist, project
is locked, data import has no category associated with it, or data
import has no fields assigned.
"""
data = self.request.POST
context = self.get_context_data(project_id, dataimport_id)
dataimport = context.get('dataimport')
if dataimport:
if dataimport.project.islocked:
messages.error(
request,
'The project is locked. Data cannot be imported.'
)
elif not dataimport.category:
messages.error(
request,
'The data import has no category associated with it.'
)
elif dataimport.keys is None:
messages.error(
request,
'The data import has no fields assigned.'
)
else:
# temporarily disable post interactions
post_interactions_backup = {}
post_interactions = SocialInteractionPost.objects.filter(project_id=project_id)
for post_interaction in post_interactions:
post_interactions_backup[post_interaction] = post_interaction.status
post_interaction.status = 'inactive'
post_interaction.save()
ids = data.get('ids')
if ids:
ids = json.loads(ids)
else:
ids = []
lookupfields = dataimport.get_lookup_fields()
datafeatures = dataimport.datafeatures.filter(
id__in=ids,
imported=False
)
imported = 0
for datafeature in datafeatures:
properties = datafeature.properties
for key, value in dict(properties).items():
if key not in dataimport.keys:
del properties[key]
elif key in lookupfields:
value, created = LookupValue.objects.get_or_create(
name=value,
field=lookupfields[key]
)
properties[key] = value.id
feature = {
"location": {
"geometry": datafeature.geometry
},
"meta": {
"category": dataimport.category.id,
},
"properties": properties
}
serializer = ContributionSerializer(
data=feature,
context={
'user': self.request.user,
'project': dataimport.project
}
)
try:
serializer.is_valid(raise_exception=True)
serializer.save()
datafeature.imported = True
datafeature.save()
imported += 1
except ValidationError:
pass
# restore post interactions
for post_interaction, status_backup in post_interactions_backup.items():
post_interaction.status = status_backup
post_interaction.save()
messages.success(
request,
'%s contribution(s) imported.' % imported
)
return redirect(
'geokey_dataimports:single_dataimport',
project_id=project_id,
dataimport_id=dataimport_id
)
return self.render_to_response(context)
class RemoveDataImportPage(DataImportContext, TemplateView):
"""Remove data import page."""
template_name = 'base.html'
def get(self, request, project_id, dataimport_id):
"""
GET method for removing data import.
Parameters
----------
request : django.http.HttpRequest
Object representing the request.
project_id : int
Identifies the project in the database.
dataimport_id : int
Identifies the data import in the database.
Returns
-------
django.http.HttpResponseRedirect
Redirects to all data imports if data import is removed, single
data import page if project is locked.
django.http.HttpResponse
Rendered template if project or data import does not exist.
"""
context = self.get_context_data(project_id, dataimport_id)
dataimport = context.get('dataimport')
if dataimport:
if dataimport.project.islocked:
messages.error(
request,
'The project is locked. Data import cannot be removed.'
)
return redirect(
'geokey_dataimports:single_dataimport',
project_id=project_id,
dataimport_id=dataimport_id
)
else:
dataimport.delete()
messages.success(
request,
'The data import has been removed.'
)
return redirect(
'geokey_dataimports:all_dataimports',
project_id=project_id
)
return self.render_to_response(context)
| {
"repo_name": "ExCiteS/geokey-dataimports",
"path": "geokey_dataimports/views.py",
"copies": "1",
"size": "27162",
"license": "mit",
"hash": -7592450608590207000,
"line_mean": 32.1648351648,
"line_max": 95,
"alpha_frac": 0.510234887,
"autogenerated": false,
"ratio": 5.234534592407015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6244769479407015,
"avg_score": null,
"num_lines": null
} |
"""All views for the WeGovNow extension."""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from requests import get
from django.shortcuts import redirect
from django.views.generic import TemplateView
from rest_framework import status
from rest_framework.renderers import JSONRenderer
from rest_framework.views import APIView
from rest_framework.response import Response
from braces.views import LoginRequiredMixin
from allauth.socialaccount import app_settings
from geokey_wegovnow.middleware import UWUMMiddleware
from geokey_wegovnow.renderers import RawHTMLRenderer
# ###########################
# ADMIN VIEWS
# ###########################
class UWUMProfileSettingsView(LoginRequiredMixin, TemplateView):
"""API endpoint for the UWUM profile settings (redirection)."""
template_name = 'base.html'
uwum_settings = app_settings.PROVIDERS.get('uwum', {})
def get(self, request):
"""GET method for the view."""
return redirect(self.uwum_settings.get('SETTINGS_URL', ''))
# ###########################
# PUBLIC API
# ###########################
class UWUMNavigationAPIView(APIView):
"""API endpoint for the UWUM navigation."""
renderer_classes = (JSONRenderer, RawHTMLRenderer)
uwum_settings = app_settings.PROVIDERS.get('uwum', {})
def get(self, request, format=None):
"""GET method for the view."""
navigation_url = self.uwum_settings.get('NAVIGATION_URL')
if not navigation_url:
return Response(
{'error': 'URL to UWUM navigation not set'},
status=status.HTTP_404_NOT_FOUND)
client_id = None
if hasattr(request, 'client_id'):
client_id = request.client_id
if (not hasattr(request, 'uwum_access_token') and
not request.user.is_anonymous()):
middleware = UWUMMiddleware()
middleware._validate_uwum_user(request)
headers = None
if hasattr(request, 'uwum_access_token'):
access_token = request.uwum_access_token
headers = {'Authorization': 'Bearer %s' % access_token}
response = get(
'%s?format=%s&client_id=%s' % (
navigation_url,
request.accepted_renderer.format,
client_id),
headers=headers)
if response.status_code == 200:
if request.accepted_renderer.format != 'raw_html':
response = response.json()
return Response(response, status=status.HTTP_200_OK)
else:
return Response(
{'error': 'UWUM navigation not found'},
status=status.HTTP_404_NOT_FOUND)
| {
"repo_name": "ExCiteS/geokey-wegovnow",
"path": "geokey_wegovnow/views.py",
"copies": "1",
"size": "2701",
"license": "mit",
"hash": -1332115923388867600,
"line_mean": 31.5421686747,
"line_max": 67,
"alpha_frac": 0.6131062569,
"autogenerated": false,
"ratio": 4.155384615384615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 83
} |
"""All views of the UWUM provider."""
from requests import get, post
from django.core.urlresolvers import reverse
from allauth.utils import build_absolute_uri
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2View,
OAuth2LoginView,
OAuth2CallbackView,
)
from .client import UWUMClient
from .provider import UWUMProvider
class UWUMAdapter(OAuth2Adapter):
"""The UWUM OAuth2 adapter."""
provider_id = UWUMProvider.id
provider_settings = UWUMProvider.settings
# General UWUM API endpoints
regular_api_url = '%s/api/%s' % (
provider_settings.get('REGULAR_URL').rstrip('/'),
provider_settings.get('API_VERSION'),
)
cert_api_url = '%s/api/%s' % (
provider_settings.get('CERT_URL').rstrip('/'),
provider_settings.get('API_VERSION'),
)
# Base OAuth2 endpoints
authorize_url = '%s/authorization' % regular_api_url
access_token_url = '%s/token' % cert_api_url
# Additional UWUM endpoints
validate_url = '%s/validate' % regular_api_url
notify_email_url = '%s/notify_email' % regular_api_url
def get_notify_email(self, access_token):
"""Get the user (UWUM member) email address used for notifications."""
headers = self._make_request_headers(access_token)
response = get(self.notify_email_url, headers=headers).json()
return response.get('result', {}).get('notify_email')
def complete_login(self, request, app, access_token, **kwargs):
"""Complete the social login process."""
response = self.validate_user(access_token).json()
if app_settings.QUERY_EMAIL and response['member']:
# Email address used for notifications will be a default user email
response['member']['email'] = self.get_notify_email(access_token)
return self.get_provider().sociallogin_from_response(request, response)
def validate_user(self, access_token):
"""Validate the user."""
headers = self._make_request_headers(access_token)
params = {'include_member': True}
return post(self.validate_url, headers=headers, params=params)
def _make_request_headers(self, access_token):
"""Make the request headers by adding the bearer access token."""
return {'Authorization': 'Bearer %s' % access_token}
class UWUMView(OAuth2View):
"""The default UWUM OAuth2 view."""
def get_client(self, request, app):
"""Get the UWUM client."""
callback_url = reverse('%s_callback' % self.adapter.provider_id)
callback_url = build_absolute_uri(request, callback_url)
provider = self.adapter.get_provider()
scope = provider.get_scope(request)
return UWUMClient(
request=self.request,
consumer_key=app.client_id,
consumer_secret=None, # UWUM uses certificates instead
access_token_method=self.adapter.access_token_method,
access_token_url=self.adapter.access_token_url,
callback_url=callback_url,
scope=scope,
)
class UWUMLoginView(UWUMView, OAuth2LoginView):
"""The UWUM OAuth2 login view."""
pass
class UWUMCallbackView(UWUMView, OAuth2CallbackView):
"""The UWUM OAuth2 callback view."""
pass
oauth2_login = UWUMLoginView.adapter_view(UWUMAdapter)
oauth2_callback = UWUMCallbackView.adapter_view(UWUMAdapter)
| {
"repo_name": "ExCiteS/django-allauth-uwum",
"path": "allauth_uwum/views.py",
"copies": "1",
"size": "3469",
"license": "mit",
"hash": -603584964514477000,
"line_mean": 32.0380952381,
"line_max": 79,
"alpha_frac": 0.6650331508,
"autogenerated": false,
"ratio": 3.6943556975505856,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9859388848350585,
"avg_score": 0,
"num_lines": 105
} |
# All views todo with votes + profiles.
from voting.models import Vote
from voting.views import vote_on_object
from voting.managers import possible_votes, votes
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.db.models.sql.datastructures import EmptyResultSet
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.db.models.query import QuerySet
from registration.signals import user_activated
from issue.models import Issue
from django.contrib.auth.views import password_reset
from tagging.models import Tag
from tagging.utils import calculate_cloud
from profiles.views import get_user_candidate_context
@login_required
def record_vote_on_user(request, user_id):
"""
Wrapper function for the voting.views.vote_on_object function.
-Handle vote on user, check if direction == 1.
-Handle ajax votes.
"""
if request.REQUEST.has_key('direction'):
direction = int(request.REQUEST['direction'])
if (not request.is_ajax()) and not direction == 1:
message = _("You did not pick a valid option")
request.session["flash_msg"] = message
next = request.REQUEST.get('next', '/' )
return HttpResponseRedirect(next)
return vote_on_object(request, User, direction, object_id=user_id, allow_xmlhttprequest=True )
return HttpResponseRedirect('/')
def get_tagcloud_intersection(agree_issues, disagree_issues):
try :
agree_tags = Tag.objects.usage_for_model(Issue,
counts=True, filters=dict(id__in=agree_issues))
except EmptyResultSet:
agree_tags = []
try:
disagree_tags = Tag.objects.usage_for_model(Issue,
counts=True, filters=dict(id__in=disagree_issues))
except EmptyResultSet:
disagree_tags = []
tags_dagree= dict((tag.name, tag) for tag in disagree_tags)
all_tags = []
# loop over al for tags, look if tag also exists in disagree tag
# if so add a status with 'conflict' to the tag.
# agree_tags have 'agree' status
# disagree_tags have no status.
for a_tag in agree_tags:
a_tag.status = 'agree'
if tags_dagree.has_key(a_tag.name):
d_tag = tags_dagree[a_tag.name]
d_tag.count = d_tag.count + a_tag.count
d_tag.status = 'conflict'
all_tags.append(d_tag)
tags_dagree.pop(a_tag.name)
else:
all_tags.append(a_tag)
all_tags.extend(tags_dagree.values())
return calculate_cloud(all_tags)
def compare_votes_to_user(request, username):
""" Compare ``request.user``'s voting history with ``username``.
sidebar:
Collored Tag cloud. Green=Agree, Yellow=Conflict, Red=Disagree.
gamenews:
Candidate info.
user details.
"""
user = get_object_or_404(User, username = username)
user_votes = Vote.objects.get_user_votes(user, Model=Issue)
if request.user.is_authenticated():
players_votes = Vote.objects.get_user_votes(request.user, Model=Issue)
vote_keys = players_votes.values_list('object_id')
players_votedict = dict((vote.object_id, vote.direction) for vote in players_votes.all())
#players_votedict = players_votes.values('object_id', 'vote')
else:
votedict = request.session.get('vote_history', dict())
players_votedict = dict((i, int(x)) for i, x in votedict.items())
vote_keys = players_votedict.keys()
intersection_votes = user_votes.filter(object_id__in=vote_keys)
intersection_votes = intersection_votes.values_list('object_id','direction')
# Now compare votes.
id_agree = []
id_disagree = []
id_blank = []
for k, vote in intersection_votes:
if players_votedict.has_key(k): # must always be true..
# If both vote the same, that is agreement.
if players_votedict[k] == vote:
id_agree.append(k)
# Both voting blanc is consirdered to be in agreement.
elif (players_votedict[k] > 1 and vote > 1):
id_agree.append(k)
# One blanc vote is considered neither agreement nor
# disagreement.
elif (players_votedict[k] > 1 or vote > 1):
id_blank.append(k)
# Disagreement:
else:
id_disagree.append(k)
n_agree, n_disagree, n_blank = len(id_agree) , len(id_disagree) , len(id_blank)
n_total_intersection = n_agree + n_disagree + n_blank
# get the issues + votes!
def _cmp(issueA, issueB): #comapre issues.
return cmp(issueA[0].title.lower(), issueB[0].title.lower())
def _issue_vote(qs): #create list with tuples (issue, direction)
issue_vote = []
issues = dict((issue.id, issue) for issue in qs.all())
for id, issue in issues.items():
vote = players_votedict[id]
#issue_vote.append((issue, possible_votes[vote]))
issue_vote.append((issue, votes.get(vote, _('blank'))))
issue_vote.sort(_cmp)
return issue_vote
agree_issues = Issue.objects.filter(id__in=id_agree)
agree_issues = _issue_vote(agree_issues)
disagree_issues = Issue.objects.filter(id__in=id_disagree)
disagree_issues = _issue_vote(disagree_issues)
blank_issues = Issue.objects.filter(id__in=id_blank)
blank_issues = _issue_vote(blank_issues)
## Get tagcloud of vote intersection.
cloud = get_tagcloud_intersection(id_agree, id_disagree)
def tag_cmp(tagA, tagB):
return cmp(tagA.name, tagB.name)
cloud.sort(tag_cmp)
context = RequestContext(request, {
'user_to_compare' : user,
'n_agree' : n_agree,
'n_disagree' : n_disagree,
'n_blank' : n_blank,
'n_total_intersection' : n_total_intersection,
'agree_issues' : agree_issues,
'disagree_issues' : disagree_issues,
'blank_issues' : blank_issues,
'cloud' : cloud,
})
context.update(get_user_candidate_context(request, user))
return render_to_response('profiles/compare_votes_to_user.html', context)
| {
"repo_name": "spreeker/democracygame",
"path": "democracy/profiles/views/votes.py",
"copies": "1",
"size": "6300",
"license": "bsd-3-clause",
"hash": 7677325183784965000,
"line_mean": 37.1818181818,
"line_max": 103,
"alpha_frac": 0.6385714286,
"autogenerated": false,
"ratio": 3.5333707234997194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.962651743803066,
"avg_score": 0.009084942813812013,
"num_lines": 165
} |
__all__ = ("VirtualHeap",
"SizedVirtualHeap")
import os
import sys
import subprocess
import random
import string
import tempfile
from six.moves import xrange
from pyoram.util._virtual_heap_helper import lib as _clib
from pyoram.util.misc import log2floor
numerals = ''.join([c for c in string.printable \
if ((c not in string.whitespace) and \
(c != '+') and (c != '-') and \
(c != '"') and (c != "'") and \
(c != '\\') and (c != '/'))])
numeral_index = dict((c,i) for i,c in enumerate(numerals))
# The maximum heap base for which base k labels
# can be produced.
max_k_labeled = len(numerals)
def base10_integer_to_basek_string(k, x):
"""Convert an integer into a base k string."""
if not (2 <= k <= max_k_labeled):
raise ValueError("k must be in range [2, %d]: %s"
% (max_k_labeled, k))
return ((x == 0) and numerals[0]) or \
(base10_integer_to_basek_string(k, x // k).\
lstrip(numerals[0]) + numerals[x % k])
def basek_string_to_base10_integer(k, x):
"""Convert a base k string into an integer."""
assert 1 < k <= max_k_labeled
return sum(numeral_index[c]*(k**i)
for i, c in enumerate(reversed(x)))
# _clib defines a faster version of this function
def calculate_bucket_level(k, b):
"""
Calculate the level in which a 0-based bucket
lives inside of a k-ary heap.
"""
assert k >= 2
if k == 2:
return log2floor(b+1)
v = (k - 1) * (b + 1) + 1
h = 0
while k**(h+1) < v:
h += 1
return h
# _clib defines a faster version of this function
def calculate_last_common_level(k, b1, b2):
"""
Calculate the highest level after which the
paths from the root to these buckets diverge.
"""
l1 = calculate_bucket_level(k, b1)
l2 = calculate_bucket_level(k, b2)
while l1 > l2:
b1 = (b1-1)//k
l1 -= 1
while l2 > l1:
b2 = (b2-1)//k
l2 -= 1
while b1 != b2:
b1 = (b1-1)//k
b2 = (b2-1)//k
l1 -= 1
return l1
def calculate_necessary_heap_height(k, n):
"""
Calculate the necessary k-ary heap height
to store n buckets.
"""
assert n >= 1
return calculate_bucket_level(k, n-1)
def calculate_bucket_count_in_heap_with_height(k, h):
"""
Calculate the number of buckets in a
k-ary heap of height h.
"""
assert h >= 0
return ((k**(h+1)) - 1) // (k - 1)
def calculate_bucket_count_in_heap_at_level(k, l):
"""
Calculate the number of buckets in a
k-ary heap at level l.
"""
assert l >= 0
return k**l
def calculate_leaf_bucket_count_in_heap_with_height(k, h):
"""
Calculate the number of buckets in the
leaf-level of a k-ary heap of height h.
"""
return calculate_bucket_count_in_heap_at_level(k, h)
def create_node_type(k):
class VirtualHeapNode(object):
__slots__ = ("bucket", "level")
def __init__(self, bucket):
assert bucket >= 0
self.bucket = bucket
self.level = _clib.calculate_bucket_level(self.k, self.bucket)
def __hash__(self):
return self.bucket.__hash__()
def __int__(self):
return self.bucket
def __lt__(self, other):
return self.bucket < other
def __le__(self, other):
return self.bucket <= other
def __eq__(self, other):
return self.bucket == other
def __ne__(self, other):
return self.bucket != other
def __gt__(self, other):
return self.bucket > other
def __ge__(self, other):
return self.bucket >= other
def last_common_level(self, n):
return _clib.calculate_last_common_level(self.k,
self.bucket,
n.bucket)
def child_node(self, c):
assert type(c) is int
assert 0 <= c < self.k
return VirtualHeapNode(self.k * self.bucket + 1 + c)
def parent_node(self):
if self.bucket != 0:
return VirtualHeapNode((self.bucket - 1)//self.k)
return None
def ancestor_node_at_level(self, level):
if level > self.level:
return None
current = self
while current.level != level:
current = current.parent_node()
return current
def path_to_root(self):
bucket = self.bucket
yield self
while bucket != 0:
bucket = (bucket - 1)//self.k
yield type(self)(bucket)
def path_from_root(self):
return list(reversed(list(self.path_to_root())))
def bucket_path_to_root(self):
bucket = self.bucket
yield bucket
while bucket != 0:
bucket = (bucket - 1)//self.k
yield bucket
def bucket_path_from_root(self):
return list(reversed(list(self.bucket_path_to_root())))
#
# Expensive Functions
#
def __repr__(self):
try:
label = self.label()
except ValueError:
# presumably, k is too large
label = "<unknown>"
return ("VirtualHeapNode(k=%s, bucket=%s, level=%s, label=%r)"
% (self.k, self.bucket, self.level, label))
def __str__(self):
"""Returns a tuple (<level>, <bucket offset within level>)."""
if self.bucket != 0:
return ("(%s, %s)"
% (self.level,
self.bucket -
calculate_bucket_count_in_heap_with_height(self.k,
self.level-1)))
assert self.level == 0
return "(0, 0)"
def label(self):
assert 0 <= self.bucket
if self.level == 0:
return ''
b_offset = self.bucket - \
calculate_bucket_count_in_heap_with_height(self.k,
self.level-1)
basek = base10_integer_to_basek_string(self.k, b_offset)
return basek.zfill(self.level)
def is_node_on_path(self, n):
if n.level <= self.level:
n_label = n.label()
if n_label == "":
return True
return self.label().startswith(n_label)
return False
VirtualHeapNode.k = k
return VirtualHeapNode
class VirtualHeap(object):
clib = _clib
random = random.SystemRandom()
def __init__(self, k, blocks_per_bucket=1):
assert 1 < k
assert blocks_per_bucket >= 1
self._k = k
self._blocks_per_bucket = blocks_per_bucket
self.Node = create_node_type(k)
@property
def k(self):
return self._k
def node_label_to_bucket(self, label):
if len(label) > 0:
return \
(calculate_bucket_count_in_heap_with_height(self.k,
len(label)-1) +
basek_string_to_base10_integer(self.k, label))
return 0
#
# Buckets (0-based integer, equivalent to block for heap
# with blocks_per_bucket=1)
#
@property
def blocks_per_bucket(self):
return self._blocks_per_bucket
def bucket_count_at_level(self, l):
return calculate_bucket_count_in_heap_at_level(self.k, l)
def first_bucket_at_level(self, l):
if l > 0:
return calculate_bucket_count_in_heap_with_height(self.k, l-1)
return 0
def last_bucket_at_level(self, l):
return calculate_bucket_count_in_heap_with_height(self.k, l) - 1
def random_bucket_up_to_level(self, l):
return self.random.randint(self.first_bucket_at_level(0),
self.last_bucket_at_level(l))
def random_bucket_at_level(self, l):
return self.random.randint(self.first_bucket_at_level(l),
self.first_bucket_at_level(l+1)-1)
#
# Nodes (a class that helps with heap path calculations)
#
def root_node(self):
return self.first_node_at_level(0)
def node_count_at_level(self, l):
return self.bucket_count_at_level(l)
def first_node_at_level(self, l):
return self.Node(self.first_bucket_at_level(l))
def last_node_at_level(self, l):
return self.Node(self.last_bucket_at_level(l))
def random_node_up_to_level(self, l):
return self.Node(self.random_bucket_up_to_level(l))
def random_node_at_level(self, l):
return self.Node(self.random_bucket_at_level(l))
#
# Block (0-based integer)
#
def bucket_to_block(self, b):
assert b >= 0
return b * self.blocks_per_bucket
def block_to_bucket(self, s):
assert s >= 0
return s//self.blocks_per_bucket
def first_block_in_bucket(self, b):
return self.bucket_to_block(b)
def last_block_in_bucket(self, b):
return self.bucket_to_block(b) + self.blocks_per_bucket - 1
def block_count_at_level(self, l):
return self.bucket_count_at_level(l) * self.blocks_per_bucket
def first_block_at_level(self, l):
return self.bucket_to_block(self.first_bucket_at_level(l))
def last_block_at_level(self, l):
return self.bucket_to_block(self.first_bucket_at_level(l+1)) - 1
class SizedVirtualHeap(VirtualHeap):
def __init__(self, k, height, blocks_per_bucket=1):
super(SizedVirtualHeap, self).\
__init__(k, blocks_per_bucket=blocks_per_bucket)
self._height = height
#
# Size properties
#
@property
def height(self):
return self._height
@property
def levels(self):
return self.height + 1
@property
def first_level(self):
return 0
@property
def last_level(self):
return self.height
#
# Buckets (0-based integer, equivalent to block for heap
# with blocks_per_bucket=1)
#
def bucket_count(self):
return calculate_bucket_count_in_heap_with_height(self.k,
self.height)
def leaf_bucket_count(self):
return calculate_leaf_bucket_count_in_heap_with_height(self.k,
self.height)
def first_leaf_bucket(self):
return self.first_bucket_at_level(self.height)
def last_leaf_bucket(self):
return self.last_bucket_at_level(self.height)
def random_bucket(self):
return self.random.randint(self.first_bucket_at_level(0),
self.last_leaf_bucket())
def random_leaf_bucket(self):
return self.random_bucket_at_level(self.height)
#
# Nodes (a class that helps with heap path calculations)
#
def is_nil_node(self, n):
return n.bucket >= self.bucket_count()
def node_count(self):
return self.bucket_count()
def leaf_node_count(self):
return self.leaf_bucket_count()
def first_leaf_node(self):
return self.Node(self.first_leaf_bucket())
def last_leaf_node(self):
return self.Node(self.last_leaf_bucket())
def random_leaf_node(self):
return self.Node(self.random_leaf_bucket())
def random_node(self):
return self.Node(self.random_bucket())
#
# Block (0-based integer)
#
def block_count(self):
return self.bucket_count() * self.blocks_per_bucket
def leaf_block_count(self):
return self.leaf_bucket_count() * self.blocks_per_bucket
def first_leaf_block(self):
return self.first_block_in_bucket(self.first_leaf_bucket())
def last_leaf_block(self):
return self.last_block_in_bucket(self.last_leaf_bucket())
#
# Visualization
#
def write_as_dot(self, f, data=None, max_levels=None):
"Write the tree in the dot language format to f."
assert (max_levels is None) or (max_levels >= 0)
def visit_node(n, levels):
lbl = "{"
if data is None:
if self.k <= max_k_labeled:
lbl = repr(n.label()).\
replace("{","\{").\
replace("}","\}").\
replace("|","\|").\
replace("<","\<").\
replace(">","\>")
else:
lbl = str(n)
else:
s = self.bucket_to_block(n.bucket)
for i in xrange(self.blocks_per_bucket):
lbl += "{%s}" % (data[s+i])
if i + 1 != self.blocks_per_bucket:
lbl += "|"
lbl += "}"
f.write(" %s [penwidth=%s,label=\"%s\"];\n"
% (n.bucket, 1, lbl))
levels += 1
if (max_levels is None) or (levels <= max_levels):
for i in xrange(self.k):
cn = n.child_node(i)
if not self.is_nil_node(cn):
visit_node(cn, levels)
f.write(" %s -> %s ;\n" % (n.bucket, cn.bucket))
f.write("// Created by SizedVirtualHeap.write_as_dot(...)\n")
f.write("digraph heaptree {\n")
f.write("node [shape=record]\n")
if (max_levels is None) or (max_levels > 0):
visit_node(self.root_node(), 1)
f.write("}\n")
def save_image_as_pdf(self, filename, data=None, max_levels=None):
"Write the heap as PDF file."
assert (max_levels is None) or (max_levels >= 0)
import os
if not filename.endswith('.pdf'):
filename = filename+'.pdf'
tmpfd, tmpname = tempfile.mkstemp(suffix='dot')
with open(tmpname, 'w') as f:
self.write_as_dot(f, data=data, max_levels=max_levels)
os.close(tmpfd)
try:
subprocess.call(['dot',
tmpname,
'-Tpdf',
'-o',
('%s'%filename)])
except OSError:
sys.stderr.write(
"DOT -> PDF conversion failed. See DOT file: %s\n"
% (tmpname))
return False
os.remove(tmpname)
return True
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/util/virtual_heap.py",
"copies": "1",
"size": "14663",
"license": "mit",
"hash": 7233506078890795000,
"line_mean": 32.2494331066,
"line_max": 79,
"alpha_frac": 0.5202209643,
"autogenerated": false,
"ratio": 3.7367482161060144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9726232844688067,
"avg_score": 0.006147267143589414,
"num_lines": 441
} |
__all__ = ['Visdata','SersicSource','GaussSource','PointSource','SIELens','ExternalShear',
'read_visdata','concatvis','bin_visibilities']
import numpy as np
import os
import astropy.constants as co
import warnings
from .utils import cart2pol,pol2cart
c = co.c.value # speed of light, in m/s
G = co.G.value # gravitational constant in SI units
Msun = co.M_sun.value # solar mass, in kg
Mpc = 1e6*co.pc.value # 1 Mpc, in m
arcsec2rad = (np.pi/(180.*3600.))
rad2arcsec =3600.*180./np.pi
deg2rad = np.pi/180.
class Visdata(object):
"""
Class to hold all necessary info relating to one set of visibilities.
Auto-updates amp&phase or real&imag if those values are changed, but
MUST SET WITH, eg, visobj.amp = (a numpy array of the new values);
CANNOT USE, eg, visobj.amp[0] = newval, AS THIS DOES NOT CALL THE
SETTER FUNCTIONS.
Parameters:
u numpy ndarray
The Fourier plane u coordinates of the visibilities to follow.
v numpy ndarray
The Fourier plane v coordinates of the visibilities to follow.
real numpy ndarray
The real parts of the visibilities
imag numpy ndarray
The imaginary parts of the visibilities
ant1 numpy ndarray
The first antenna number or name of the visibility on each baseline
ant2 numpy ndarray
The second antenna number or name of the visibility on each baseline
PBfwhm float
The FWHM of the antenna primary beam at this wavelength (at present
assumes a homogeneous antenna array)
filename str
A filename associated with these data.
"""
def __init__(self,u,v,real,imag,sigma,ant1=None,ant2=None,PBfwhm=None,filename=None):
self.u = u
self.v = v
self.real = real
self.imag = imag
self.sigma = sigma
self.ant1 = ant1
self.ant2 = ant2
self.PBfwhm = PBfwhm
self.filename = filename
@property
def uvdist(self):
return np.sqrt(self.u**2. + self.v**2.)
@property
def real(self):
return self._real
@real.setter
def real(self,val):
self._real = val
# Setting amp & phase during __init__ will fail since imag is still unknown
# Doing so during conjugate() will also fail, but gives a ValueError
try:
self._amp = np.sqrt(self._real**2. + self.imag**2.)
self._phase = np.arctan2(self.imag,self._real)
except (AttributeError,ValueError):
self._amp = None
self._phase = None
@property
def imag(self):
return self._imag
@imag.setter
def imag(self,val):
self._imag = val
try:
self._amp = np.sqrt(self.real**2. + self._imag**2.)
self._phase = np.arctan2(self._imag,self.real)
except (AttributeError,ValueError):
self._amp = None
self._phase = None
@property
def amp(self):
return self._amp
@amp.setter
def amp(self,val):
self._amp = val
self._real = val * np.cos(self.phase)
self._imag = val * np.sin(self.phase)
@property
def phase(self):
return self._phase
@phase.setter
def phase(self,val):
self._phase = val
self._real = self.amp * np.cos(val)
self._imag = self.amp * np.sin(val)
def __add__(self,other):
return Visdata(self.u,self.v,self.real+other.real,self.imag+other.imag,\
(self.sigma**-2. + other.sigma**-2.)**-0.5)
def __sub__(self,other):
return Visdata(self.u,self.v,self.real-other.real,self.imag-other.imag,\
(self.sigma**-2. + other.sigma**-2.)**-0.5)
def conjugate(self):
u = np.concatenate((self.u,-self.u))
v = np.concatenate((self.v,-self.v))
real = np.concatenate((self.real,self.real))
imag = np.concatenate((self.imag,-self.imag))
sigma = np.concatenate((self.sigma,self.sigma))
ant1 = np.concatenate((self.ant1,self.ant2))
ant2 = np.concatenate((self.ant2,self.ant1))
self.u = u
self.v = v
self.real = real
self.imag = imag
self.sigma = sigma
self.ant1 = ant1
self.ant2 = ant2
def to_binfile(self,filename,overwrite=False):
"""
Write out the visibility data to a .bin file that can then be read by
vl.read_visdata.
filename: string
File to write out to. Will have '.bin' appended to it if not already given.
overwrite: boolean
If filename already exists and overwrite=False, will not overwrite existing file.
"""
allarr = np.vstack((self.u,self.v,self.real,self.imag,self.sigma,self.ant1,self.ant2))
if not filename[-4:] == '.bin': filename += '.bin'
if os.path.isfile(filename) and not overwrite:
raise IOError('filename {0:s} exists and overwrite=False; '\
'use visdata.to_binfile(filename,overwrite=True) to overwrite')
with open(filename,'wb') as f:
allarr.tofile(f)
f.write(self.PBfwhm)
class SIELens(object):
"""
Class to hold parameters for an SIE lens, with each parameter (besides
redshift) a dictionary.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Note: in my infinite future free time, will probably replace e and PA with
the x and y components of the ellipticity, which are better behaved as e->0.
Parameters:
z
Lens redshift. If unknown, any value can be chosen as long as it is
less than the source redshift you know/assume.
x, y
Position of the lens, in arcseconds relative to the phase center of
the data (or any other reference point of your choosing). +x is west
(sorry not sorry), +y is north.
M
Lens mass, in Msun. With the lens and source redshifts, sets the
overall "strength" of the lens. Can be converted to an Einstein radius
using theta_Ein = (4*G*M * D_LS / (c**2 * D_L * D_S))**0.5, in radians,
with G and c the gravitational constant and speed of light, and D_L, D_S
and D_LS the distances to the lens, source, and between the lens and source,
respectively.
e
Lens ellipticity, ranging from 0 (a circularly symmetric lens) to 1 (a very
elongated lens).
PA
Lens major axis position angle, in degrees east of north.
"""
def __init__(self,z,x,y,M,e,PA):
# Do some input handling.
if not isinstance(x,dict):
x = {'value':x,'fixed':False,'prior':[-30.,30.]}
if not isinstance(y,dict):
y = {'value':y,'fixed':False,'prior':[-30.,30.]}
if not isinstance(M,dict):
M = {'value':M,'fixed':False,'prior':[1e7,1e15]}
if not isinstance(e,dict):
e = {'value':e,'fixed':False,'prior':[0.,1.]}
if not isinstance(PA,dict):
PA = {'value':PA,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [x,y,M,e,PA]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in x: x['fixed'] = False
if not 'fixed' in y: y['fixed'] = False
if not 'fixed' in M: M['fixed'] = False
if not 'fixed' in e: e['fixed'] = False
if not 'fixed' in PA: PA['fixed'] = False
if not 'prior' in x: x['prior'] = [-30.,30.]
if not 'prior' in y: y['prior'] = [-30.,30.]
if not 'prior' in M: M['prior'] = [1e7,1e15]
if not 'prior' in e: e['prior'] = [0.,1.]
if not 'prior' in PA: PA['prior'] = [0.,180.]
self.z = z
self.x = x
self.y = y
self.M = M
self.e = e
self.PA = PA
# Here we keep a Boolean flag which tells us whether one of the lens
# properties has changed since the last time we did the lensing
# deflections. If everything is the same, we don't need to lens twice.
self._altered = True
def deflect(self,xim,yim,Dd,Ds,Dds):
"""
Follow Kormann+1994 for the lensing deflections.
Parameters:
xim, yim
2D Arrays of image coordinates we're going to lens,
probably generated by np.meshgrid.
Dd, Ds, Dds
Distances to the lens, source and between the source
and lens (units don't matter as long as they're the
same). Can't be calculated only from lens due to source
distances.
"""
if self._altered: # Only redo if something is new.
ximage, yimage = xim.copy(), yim.copy() # for safety.
f = 1. - self.e['value']
fprime = np.sqrt(1. - f**2.)
# K+94 parameterizes in terms of LOS velocity dispersion and then
# basically the Einstein radius.
sigma = ((self.M['value']*Ds*G*Msun*c**2.)/(4*np.pi**2. * Dd*Dds*Mpc))**(1/4.)
Xi0 = 4*np.pi * (sigma/c)**2. * (Dd*Dds/Ds)
# Flip units, the recenter and rotate grid to lens center and major axis
ximage *= arcsec2rad; yimage *= arcsec2rad
ximage -= (self.x['value']*arcsec2rad)
yimage -= (self.y['value']*arcsec2rad)
if not np.isclose(self.PA['value'], 0.):
r,theta = cart2pol(ximage,yimage)
ximage,yimage = pol2cart(r,theta-(self.PA['value']*deg2rad))
phi = np.arctan2(yimage,ximage)
# Calculate the deflections, account for e=0 (the SIS), which has
# cancelling infinities. K+94 eq 27a.
if np.isclose(f, 1.):
dxs = -(Xi0/Dd)*np.cos(phi)
dys = -(Xi0/Dd)*np.sin(phi)
else:
dxs = -(Xi0/Dd)*(np.sqrt(f)/fprime)*np.arcsinh(np.cos(phi)*fprime/f)
dys = -(Xi0/Dd)*(np.sqrt(f)/fprime)*np.arcsin(np.sin(phi)*fprime)
# Rotate and shift back to sky frame
if not np.isclose(self.PA['value'], 0.):
r,theta = cart2pol(dxs,dys)
dxs,dys = pol2cart(r,theta+(self.PA['value']*deg2rad))
dxs *= rad2arcsec; dys *= rad2arcsec
self.deflected_x = dxs
self.deflected_y = dys
self._altered = False
class ExternalShear(object):
"""
Class to hold the two parameters relating to an external tidal shear,
where each parameter is a dictionary.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
shear:
The strength of the external shear. Should be 0 to 1 (although treating
other objects in the lensing environment like this is really only valid
for shear <~ 0.3).
shearangle
The position angle of the tidal shear, in degrees east of north.
"""
def __init__(self,shear,shearangle):
# Do some input handling.
if not isinstance(shear,dict):
shear = {'value':shear,'fixed':False,'prior':[0.,1.]}
if not isinstance(shearangle,dict):
shearangle = {'value':shearangle,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [shear,shearangle]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in shear: shear['fixed'] = False
if not 'fixed' in shearangle: shearangle['fixed'] = False
if not 'prior' in shear: shear['prior'] = [0.,1.]
if not 'prior' in shearangle: shearangle['prior'] = [0.,180.]
self.shear = shear
self.shearangle = shearangle
def deflect(self,xim,yim,lens):
"""
Calculate deflection following Keeton,Mao,Witt 2000.
Parameters:
xim, yim
2D Arrays of image coordinates we're going to lens,
probably generated by np.meshgrid.
lens
A lens object; we use this to shift the coordinate system
to be centered on the lens.
"""
ximage,yimage = xim.copy(), yim.copy()
ximage -= lens.x['value']; yimage -= lens.y['value']
if not np.isclose(lens.PA['value'], 0.):
r,theta = cart2pol(ximage,yimage)
ximage,yimage = pol2cart(r,theta-(lens.PA['value']*deg2rad))
# KMW2000, altered for our coordinate convention.
g,thg = self.shear['value'], (self.shearangle['value']-lens.PA['value'])*deg2rad
dxs = -g*np.cos(2*thg)*ximage - g*np.sin(2*thg)*yimage
dys = -g*np.sin(2*thg)*ximage + g*np.cos(2*thg)*yimage
if not np.isclose(lens.PA['value'], 0.):
r,theta = cart2pol(dxs,dys)
dxs,dys = pol2cart(r,theta+(lens.PA['value']*deg2rad))
self.deflected_x = dxs; self.deflected_y = dys
class SersicSource(object):
"""
Class to hold parameters of an elliptical Sersic light profile, ie
I(x,y) = A * exp(-bn*((r/reff)^(1/n)-1)),
where bn makes reff enclose half the light (varies with Sersic index),
and all the variable parameters are dictionaries. This profile is
parameterized by the major axis and axis ratio; you can get the half-light
radius with r_eff = majax * sqrt(axisratio).
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is True, this position
is relative to the position of the lens (or the first lens in a list of
lenses). If lensed is False, this position is relative to the field
center (or (0,0) coordinates). +x is west (sorry not sorry), +y is north.
flux
Total integrated flux density of the source (ie, NOT peak pixel value), in
units of Jy.
majax
The source major axis in arcseconds.
index
The Sersic profile index n (0.5 is ~Gaussian, 1 is ~an exponential disk, 4
is a de Vaucoleurs profile).
axisratio
The source minor/major axis ratio, varying from 1 (circularly symmetric) to
0 (highly elongated).
PA
Source position angle. If lensed is True, this is in degrees CCW from the
lens major axis (or first lens in a list of them). If lensed is False, this
is in degrees east of north.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None,majax=None,\
index=None,axisratio=None,PA=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not isinstance(majax,dict):
majax = {'value':majax,'fixed':False,'prior':[0.,2.]} # arcsec
if not isinstance(index,dict):
index = {'value':index,'fixed':False,'prior':[0.3,4.]}
if not isinstance(axisratio,dict):
axisratio = {'value':axisratio,'fixed':False,'prior':[0.01,1.]}
if not isinstance(PA,dict):
PA = {'value':PA,'fixed':False,'prior':[0.,180.]}
if not all(['value' in d for d in [xoff,yoff,flux,majax,index,axisratio,PA]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'fixed' in majax: majax['fixed'] = False
if not 'fixed' in index: index['fixed'] = False
if not 'fixed' in axisratio: axisratio['fixed'] = False
if not 'fixed' in PA: PA['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
if not 'prior' in majax: majax['prior'] = [0.,2.]
if not 'prior' in index: index['prior'] = [1/3.,10]
if not 'prior' in axisratio: axisratio['prior'] = [0.01,1.]
if not 'prior' in PA: PA['prior'] = [0.,180.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
self.majax = majax
self.index = index
self.axisratio = axisratio
self.PA = PA
class GaussSource(object):
"""
Class to hold parameters of a circularly symmetric Gaussian light
profile, where all the variable parameters are dictionaries.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is True, this position
is relative to the position of the lens (or the first lens in a list of
lenses). If lensed is False, this position is relative to the field
center (or (0,0) coordinates). +x is west (sorry not sorry), +y is north.
flux
Total integrated flux density of the source (ie, NOT peak pixel value), in
units of Jy.
width
The Gaussian width (sigma) of the light profile, in arcseconds.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None,width=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not isinstance(width,dict):
width = {'value':width,'fixed':False,'prior':[0.,2.]} # arcsec
if not all(['value' in d for d in [xoff,yoff,flux,width]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'fixed' in width: width['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
if not 'prior' in width: width['prior'] = [0.,2.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
self.width = width
class PointSource(object):
"""
Class to hold parameters of an (unlensed) object unresolved by
the data, where all the variable parameters are dictionaries.
Example format of each parameter:
x = {'value':x0,'fixed':False,'prior':[xmin,xmax]}, where x0 is the
initial/current value of x, x should not be a fixed parameter during fitting,
and the value of x must be between xmin and xmax.
NOTE: Having a lensed point source is not currently implemented.
Parameters:
z
Source redshift. Can be made up, as long as it's higher than
the lens redshift.
lensed
True/False flag determining whether this object is actually lensed
(in which case it gets run through the lensing equations) or not (in
which case it's simply added to the model of the field without lensing).
This also determines the convention for the source position coordinates,
see below.
x, y
Position of the source in arcseconds. If lensed is False (it must be),
this position is relative to the field center (or (0,0) coordinates).
+x is west (sorry not sorry), +y is north.
flux
Total flux density of the source, in units of Jy.
"""
def __init__(self,z,lensed=True,xoff=None,yoff=None,flux=None):
# Do some input handling.
if not isinstance(xoff,dict):
xoff = {'value':xoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(yoff,dict):
yoff = {'value':yoff,'fixed':False,'prior':[-10.,10.]}
if not isinstance(flux,dict):
flux = {'value':flux,'fixed':False,'prior':[1e-5,1.]} # 0.01 to 1Jy source
if not all(['value' in d for d in [xoff,yoff,flux]]):
raise KeyError("All parameter dicts must contain the key 'value'.")
if not 'fixed' in xoff: xoff['fixed'] = False
if not 'fixed' in yoff: yoff['fixed'] = False
if not 'fixed' in flux: flux['fixed'] = False
if not 'prior' in xoff: xoff['prior'] = [-10.,10.]
if not 'prior' in yoff: yoff['prior'] = [-10.,10.]
if not 'prior' in flux: flux['prior'] = [1e-5,1.]
self.z = z
self.lensed = lensed
self.xoff = xoff
self.yoff = yoff
self.flux = flux
def read_visdata(filename):
"""
Function to read in visibility data from file and create a visdata object
to hold it afterwards. So far only .bin files from get_visibilities.py are
supported; idea is eventually to be able to not mess with that and get straight
from a CASA ms, but don't currently know how to do that without bundling the
casacore utilities directly...
Params:
filename
Name of file to read from. Should contain all the visibility data needed,
including u (Lambda), v (Lambda), real, imag, sigma, antenna1, and antenna 2.
Returns:
visdata
A visdata object containing the data from filename.
"""
if not filename.split('.')[-1].lower() in ['bin']:
raise ValueError('Only .bin files are supported for now...')
data = np.fromfile(filename)
PBfwhm = data[-1]
data = data[:-1]
data = data.reshape(7,data.size//7) # bin files lose array shape, so reshape to match
data = Visdata(*data,PBfwhm=PBfwhm,filename=filename)
# Check for auto-correlations:
if (data.u == 0).sum() > 0:
warnings.warn("Found autocorrelations when reading the data (u == v == 0); removing them...")
bad = data.u == 0
data = Visdata(data.u[~bad],data.v[~bad],data.real[~bad],data.imag[~bad],data.sigma[~bad],
data.ant1[~bad],data.ant2[~bad],data.PBfwhm,data.filename)
# Check for flagged / otherwise bad data
if (data.amp == 0).sum() > 0:
warnings.warn("Found flagged/bad data when reading the data (amplitude == 0); removing them...")
bad = data.amp == 0
data = Visdata(data.u[~bad],data.v[~bad],data.real[~bad],data.imag[~bad],data.sigma[~bad],
data.ant1[~bad],data.ant2[~bad],data.PBfwhm,data.filename)
return data
def concatvis(visdatas):
"""
Concatenate multiple visibility sets into one larger set.
Does no consistency checking of any kind, so beware.
:param visdatas:
List of visdata objects
This method returns:
* ``concatvis'' - The concatenated visibility set.
"""
newu, newv, newr, newi = np.array([]),np.array([]),np.array([]),np.array([])
news, newa1,newa2 = np.array([]),np.array([]),np.array([])
for vis in visdatas:
newu = np.concatenate((newu,vis.u))
newv = np.concatenate((newv,vis.v))
newr = np.concatenate((newr,vis.real))
newi = np.concatenate((newi,vis.imag))
news = np.concatenate((news,vis.sigma))
newa1= np.concatenate((newa1,vis.ant1))
newa2= np.concatenate((newa2,vis.ant2))
return Visdata(newu,newv,newr,newi,news,newa1,newa2,visdatas[0].PBfwhm,'Combined Data')
def bin_visibilities(visdata,maxnewsize=None):
"""
WARNING: DOESN'T WORK CURRENTLY(?)
Bins up (ie, averages down) visibilities to reduce the total
number of them. Note that since we fit directly to the visibilities,
this is slightly different (and easier) than gridding in preparation for
imaging, as we won't need to FFT and so don't need a convolution function.
:param visdata
A Visdata object.
:param maxnewsize = None
If desired, the maximum number of visibilities post-binning can
be specified. As long as this number meets other criteria (ie,
we don't have bin sizes smaller than an integration time or
bandwidth in wavelengths), the total number in the returned
Visdata will have fewer than maxnewsize visibilities.
This method returns:
* ``BinnedVisibilities'' - A Visdata object containing binned visibilities.
"""
if maxnewsize is None: maxnewsize = visdata.u.size/2
# Bins should be larger than an integration; strictly only valid for an EW array,
# and assumes a 20s integration time. Thus, this is a conservative estimate.
minbinsize = 20. * visdata.uvdist.max() / (24*3600.)
# Bins should be smaller than the effective field size
maxbinsize = (visdata.PBfwhm * arcsec2rad)**-1
print(minbinsize,maxbinsize)
# We're going to find a binning solution iteratively; this gets us set up
Nbins, binsizeunmet, Nvis, it, maxiter = [3000,3000], True, visdata.u.size, 0, 250
while (binsizeunmet or Nvis >= maxnewsize):
print(Nbins)
# Figure out how to bin up the data
counts,uedges,vedges,bins = stats.binned_statistic_2d(
visdata.u,visdata.v,values=visdata.real,statistic='count',
bins=Nbins)
du, dv = uedges[1]-uedges[0], vedges[1]-vedges[0]
# Check that our bins in u and v meet our conditions
if (du > minbinsize and du < maxbinsize and
dv > minbinsize and dv < maxbinsize): binsizeunmet = False
# Otherwise we have to adjust the number of bins to adjust their size...
#elif (du <= minbinsize or dv <= minbinsize): Nbins = int(Nbins/1.2)
#elif (du >= maxbinsize or dv >= maxbinsize): Nbins = int(Nbins*1.2)
elif du <= minbinsize: Nbins[0] = int(Nbins[0]/1.1); binsizeunmet=True
elif dv <= minbinsize: Nbins[1] = int(Nbins[1]/1.1); binsizeunmet=True
elif du >= maxbinsize: Nbins[0] = int(Nbins[0]*1.1); binsizeunmet=True
elif dv >= maxbinsize: Nbins[1] = int(Nbins[1]*1.1); binsizeunmet=True
# If we still have more than the desired number of visibilities, make
# fewer bins (we'll loop after this).
if np.unique(bins).size > maxnewsize: Nbins[0],Nbins[1] = int(Nbins[0]/1.1),int(Nbins[1]/1.1)
Nvis = np.unique(bins).size
it += 1
if it > maxiter: raise ValueError("It's impossible to split your data into that few bins! "
"Try setting maxnewsize to a larger value!")
print(Nvis,du,dv)
# Get us some placeholder arrays for the binned data
u,v,real,imag,sigma,ant1,ant2 = np.zeros((7,Nvis))
for i,filledbin in enumerate(np.unique(bins)):
# This tells us which visibilities belong to the current bin
points = np.where(bins==filledbin)[0]
# This unravels the indices to uedges,vedges from the binned_statistic binnumber
uloc = int(np.floor(filledbin/(vedges.size+1)) - 1)
vloc = int(filledbin - (vedges.size+1)*(uloc+1) - 1)
# Get our new data, place at center of uv bins
u[i],v[i] = uedges[uloc]+0.5*du, vedges[vloc]+0.5*dv
real[i],sumwt = np.average(visdata.real[points],weights=visdata.sigma[points]**-2.,returned=True)
imag[i] = np.average(visdata.imag[points],weights=visdata.sigma[points]**-2.)
sigma[i] = sumwt**-0.5
# We can keep the antenna numbers if we've only selected points from the same baseline,
# otherwise get rid of them (CHECK IF MODELCAL FAILS WITH None ANTENNAS)
ant1[i] = visdata.ant1[points][0] if (visdata.ant1[points]==visdata.ant1[points][0]).all() else None
ant2[i] = visdata.ant2[points][0] if (visdata.ant2[points]==visdata.ant2[points][0]).all() else None
return Visdata(u,v,real,imag,sigma,ant1,ant2,visdata.PBfwhm,'BIN{0}'.format(Nvis)+visdata.filename)
| {
"repo_name": "jspilker/visilens",
"path": "visilens/class_utils.py",
"copies": "1",
"size": "32136",
"license": "mit",
"hash": -1973722716337958700,
"line_mean": 43.4481327801,
"line_max": 112,
"alpha_frac": 0.5588125467,
"autogenerated": false,
"ratio": 3.8075829383886255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9714990131223668,
"avg_score": 0.030281070772991536,
"num_lines": 723
} |
__all__ = ['visible', 'cacheable', 'callable_find_by', 'property_find_by']
def cacheable_decorator(lookup):
def func(self):
if not hasattr(self, '_elements_cache'):
self._elements_cache = {} # {callable_id: element(s)}
cache = self._elements_cache
key = id(lookup)
if key not in cache:
cache[key] = lookup(self)
return cache[key]
return func
cacheable = cacheable_decorator
_strategy_kwargs = ['id_', 'xpath', 'link_text', 'partial_link_text',
'name', 'tag_name', 'class_name', 'css_selector']
def _callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs):
def func(self):
# context - driver or a certain element
if context:
ctx = context() if callable(context) else context.__get__(self) # or property
else:
ctx = getattr(self, driver_attr)
# 'how' AND 'using' take precedence over keyword arguments
if how and using:
lookup = ctx.find_elements if multiple else ctx.find_element
return lookup(how, using)
if len(kwargs) != 1 or kwargs.keys()[0] not in _strategy_kwargs :
raise ValueError(
"If 'how' AND 'using' are not specified, one and only one of the following "
"valid keyword arguments should be provided: %s." % _strategy_kwargs)
key = kwargs.keys()[0]; value = kwargs[key]
suffix = key[:-1] if key.endswith('_') else key # find_element(s)_by_xxx
prefix = 'find_elements_by' if multiple else 'find_element_by'
lookup = getattr(ctx, '%s_%s' % (prefix, suffix))
return lookup(value)
return cacheable_decorator(func) if cacheable else func
def callable_find_by(how=None, using=None, multiple=False, cacheable=False, context=None, driver_attr='_driver', **kwargs):
return _callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs)
def property_find_by(how=None, using=None, multiple=False, cacheable=False, context=None, driver_attr='_driver', **kwargs):
return property(_callable_find_by(how, using, multiple, cacheable, context, driver_attr, **kwargs))
def visible(element):
def expected_condition(ignored):
candidate = element() if callable(element) else element
return candidate if (candidate and candidate.is_displayed()) else None
return expected_condition
| {
"repo_name": "imsardine/learning",
"path": "selenium/pageobject_support.py",
"copies": "1",
"size": "2447",
"license": "mit",
"hash": -6286568557057962000,
"line_mean": 41.1896551724,
"line_max": 123,
"alpha_frac": 0.6317940335,
"autogenerated": false,
"ratio": 3.853543307086614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4985337340586614,
"avg_score": null,
"num_lines": null
} |
__all__ = ['VisualizationFrame']
import numpy as np
from sympy import Dummy, lambdify
from sympy.matrices.expressions import Identity
from sympy.physics.mechanics import Point, ReferenceFrame
from .shapes import Shape
class VisualizationFrame(object):
"""
A VisualizationFrame represents an object that you want to visualize.
It allows you to easily associate a reference frame and a point
with a shape.
A VisualizationFrame can be attached to only one Shape Object.
It can be nested, i.e we can add/remove multiple visualization frames to
one visualization frame. On adding the parent frame to the
Scene object, all the children of the parent visualization frame
are also added, and hence can be visualized and animated.
A VisualizationFrame needs to have a ReferenceFrame, and a Point
for it to form transformation matrices for visualization and
animations.
The ReferenceFrame and Point are required to be provided during
initialization. They can be supplied in the form of any one of these:
1)reference_frame, point argument.
2)a RigidBody argument
3)reference_frame, particle argument.
In addition to these arguments, A shape argument is also required.
"""
def __init__(self, *args):
"""
To initialize a visualization frame a ReferenceFrame,
Point, and Shape are required. These ReferenceFrame
and Point can be passed provided in three ways:
1) RigidBody: the RigidBody's frame and mass center are used.
2) ReferenceFrame and a Particle: The Particle's Point is used.
3) ReferenceFrame and a Point
Parameters
==========
name : str, optional
Name assigned to VisualizationFrame, default is unnamed
reference_frame : ReferenceFrame
A reference_frame with respect to which all orientations of the
shape takes place, during visualizations/animations.
origin : Point
A point with respect to which all the translations of the shape
takes place, during visualizations/animations.
rigidbody : RigidBody
A rigidbody whose reference frame and mass center are to be
assigned as reference_frame and origin of the
VisualizationFrame.
particle : Particle
A particle whose point is assigned as origin of the
VisualizationFrame.
shape : Shape
A shape to be attached to the VisualizationFrame
Examples
========
>>> from pydy.viz import VisualizationFrame, Sphere
>>> from sympy.physics.mechanics import \
ReferenceFrame, Point, RigidBody, \
Particle, inertia
>>> from sympy import symbols
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> shape = Sphere(5)
>>> #initializing with reference frame, point
>>> frame1 = VisualizationFrame('frame1', I, O, shape)
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> # Initializing with a rigidbody ..
>>> frame2 = VisualizationFrame('frame2', rbody, shape)
>>> Pa = Particle('Pa', O, mass)
>>> #initializing with Particle, reference_frame ...
>>> frame3 = VisualizationFrame('frame3', I, Pa, shape)
"""
#Last arg should be a Shape ..
if isinstance(args[-1], Shape):
self._shape = args[-1]
else:
raise TypeError('''Please provide a valid shape object''')
i = 0
#If first arg is not str, name the visualization frame 'unnamed'
if isinstance(args[i], str):
self._name = args[i]
i += 1
else:
self._name = 'unnamed'
try:
self._reference_frame = args[i].get_frame()
self._origin = args[i].get_masscenter()
except AttributeError:
#It is not a rigidbody, hence this arg should be a
#reference frame
try:
dcm = args[i]._dcm_dict
self._reference_frame = args[i]
i += 1
except AttributeError:
raise TypeError(''' A ReferenceFrame is to be supplied
before a Particle/Point. ''')
#Now next arg can either be a Particle or point
try:
self._origin = args[i].get_point()
except AttributeError:
self._origin = args[i]
#setting attributes ..
def __str__(self):
return 'VisualizationFrame ' + self._name
def __repr__(self):
return 'VisualizationFrame'
@property
def name(self):
"""
Name of the VisualizationFrame.
"""
return self._name
@name.setter
def name(self, new_name):
"""
Sets the name of the VisualizationFrame.
"""
if not isinstance(new_name, str):
raise TypeError('''Name should be a str object''')
else:
self._name = new_name
@property
def origin(self):
"""
Origin of the VisualizationFrame,
with respect to which all translational transformations
take place.
"""
return self._origin
@origin.setter
def origin(self, new_origin):
"""
Sets the origin of the VisualizationFrame.
"""
if not isinstance(new_origin, Point):
raise TypeError('''origin should be a valid Point Object''')
else:
self._origin = new_origin
@property
def reference_frame(self):
"""
reference_frame of the VisualizationFrame,
with respect to which all rotational/orientational
transformations take place.
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, new_reference_frame):
if not isinstance(new_reference_frame, ReferenceFrame):
raise TypeError('''reference_frame should be a valid
ReferenceFrame object.''')
else:
self._reference_frame = new_reference_frame
@property
def shape(self):
"""
shape in the VisualizationFrame.
A shape attached to the visualization frame.
NOTE: Only one shape can be attached to a visualization frame.
"""
return self._shape
@shape.setter
def shape(self, new_shape):
"""
Sets the shape for VisualizationFrame.
"""
if not isinstance(new_shape, Shape):
raise TypeError('''shape should be a valid Shape object.''')
else:
self._shape = new_shape
def generate_transformation_matrix(self, reference_frame, point):
"""Generates a symbolic transformation matrix, with respect to the
provided reference frame and point.
Parameters
==========
reference_frame : ReferenceFrame
A reference_frame with respect to which transformation matrix is
generated.
point : Point
A point with respect to which transformation matrix is
generated.
Returns
=======
A 4 x 4 SymPy matrix, containing symbolic expressions describing the
transformation as a function of time.
"""
rotation_matrix = self.reference_frame.dcm(reference_frame)
self._transform = Identity(4).as_mutable()
self._transform[0:3, 0:3] = rotation_matrix[0:3, 0:3]
_point_vector = self.origin.pos_from(point).express(reference_frame)
self._transform[3, 0] = _point_vector.dot(reference_frame.x)
self._transform[3, 1] = _point_vector.dot(reference_frame.y)
self._transform[3, 2] = _point_vector.dot(reference_frame.z)
return self._transform
def generate_numeric_transform_function(self, dynamic_variables,
constant_variables):
"""Returns a function which can compute the numerical values of the
transformation matrix given the numerical dynamic variables (i.e.
functions of time or states) and the numerical system constants.
Parameters
==========
dynamic_variables : list of sympy.Functions(time)
All of the dynamic symbols used in defining the orientation and
position of this visualization frame.
constant_variables : list of sympy.Symbols
All of the constants used in defining the orientation and
position of this visualization frame.
Returns
=======
numeric_transform : function
A function which returns the numerical transformation matrix.
"""
dummy_symbols = [Dummy() for i in dynamic_variables]
dummy_dict = dict(zip(dynamic_variables, dummy_symbols))
transform = self._transform.subs(dummy_dict)
self._numeric_transform = lambdify(dummy_symbols +
constant_variables, transform,
modules="numpy")
return self._numeric_transform
def evaluate_transformation_matrix(self, dynamic_values, constant_values):
"""Returns the numerical transformation matrices for each time step.
Parameters
----------
dynamic_values : array_like, shape(m,) or shape(n, m)
The m state values for each n time step.
constant_values : array_like, shape(p,)
The p constant parameter values of the system.
Returns
-------
transform_matrix : numpy.array, shape(n, 4, 4)
A 4 x 4 transformation matrix for each time step.
"""
#If states is instance of numpy array, well and good.
#else convert it to one:
states = np.array(dynamic_values)
if len(states.shape) > 1:
n = states.shape[0]
new = np.zeros((n, 4, 4))
for i, time_instance in enumerate(states):
args = np.hstack((time_instance, constant_values))
new[i, :, :] = self._numeric_transform(*args)
else:
n = 1
args = np.hstack((states, constant_values))
new = self._numeric_transform(*args)
self._visualization_matrix = new.reshape(n, 16).tolist()
return self._visualization_matrix
def generate_scene_dict(self, constant_map={}):
"""
This method generates information for a static
visualization in the initial conditions, in the form
of dictionary. This contains shape information
from `Shape.generate_dict()` followed by an
init_orientation Key.
Before calling this method, all the transformation matrix
generation methods should be called, or it will give an error.
Parameters
==========
constant_map : dictionary
Constant map is required when Shape contains sympy expressions.This
dictionary maps sympy expressions/symbols to numerical values(floats)
Returns
=======
A dictionary built with a call to `Shape.generate_dict`.
Additional keys included in the dict are following:
1. init_orientation: Specifies the initial orientation
of the `VisualizationFrame`.
2. reference_frame_name: Name(str) of the reference_frame
attached to this VisualizationFrame.
3. simulation_id: an arbitrary integer to map scene description
with the simulation data.
"""
scene_dict = { id(self): {} }
scene_dict[id(self)] = self.shape.generate_dict(constant_map=constant_map)
scene_dict[id(self)]["init_orientation"] = self._visualization_matrix[0]
scene_dict[id(self)]["reference_frame_name"] = str(self._reference_frame)
scene_dict[id(self)]["simulation_id"] = id(self)
return scene_dict
def generate_simulation_dict(self):
"""
Generates the simulation information for this visualization
frame. It maps the simulation data information to the
scene information via a unique id.
Before calling this method, all the transformation matrix
generation methods should be called, or it will give an error.
Returns
=======
A dictionary containing list of 4x4 matrices mapped to
the unique id as the key.
"""
simulation_dict = {}
try:
simulation_dict[id(self)] = self._visualization_matrix
except:
raise RuntimeError("Cannot generate visualization data " + \
"because numerical transformation " + \
"has not been performed, " + \
"Please call the numerical " + \
"transformation methods, " + \
"before generating visualization dict")
return simulation_dict
| {
"repo_name": "jcrist/pydy",
"path": "pydy/viz/visualization_frame.py",
"copies": "1",
"size": "13231",
"license": "bsd-3-clause",
"hash": 6582109941948204000,
"line_mean": 34.9538043478,
"line_max": 82,
"alpha_frac": 0.5954198473,
"autogenerated": false,
"ratio": 4.779985549132948,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5875405396432949,
"avg_score": null,
"num_lines": null
} |
__all__ = ['VisualizationFrame']
import sys
if sys.version_info < (3, 0):
from collections import Iterator
else:
from collections.abc import Iterator
import numpy as np
from sympy import Dummy, lambdify
from sympy.matrices.expressions import Identity
from sympy.physics.mechanics import Point, ReferenceFrame
from .shapes import Shape
from ..utils import sympy_equal_to_or_newer_than
class VisualizationFrame(object):
"""
A VisualizationFrame represents an object that you want to visualize.
It allows you to easily associate a reference frame and a point
with a shape.
A VisualizationFrame can be attached to only one Shape Object.
It can be nested, i.e we can add/remove multiple visualization frames to
one visualization frame. On adding the parent frame to the
Scene object, all the children of the parent visualization frame
are also added, and hence can be visualized and animated.
A VisualizationFrame needs to have a ReferenceFrame, and a Point
for it to form transformation matrices for visualization and
animations.
The ReferenceFrame and Point are required to be provided during
initialization. They can be supplied in the form of any one of these:
1)reference_frame, point argument.
2)a RigidBody argument
3)reference_frame, particle argument.
In addition to these arguments, A shape argument is also required.
"""
def __init__(self, *args):
"""
To initialize a visualization frame a ReferenceFrame,
Point, and Shape are required. These ReferenceFrame
and Point can be passed provided in three ways:
1) RigidBody: the RigidBody's frame and mass center are used.
2) ReferenceFrame and a Particle: The Particle's Point is used.
3) ReferenceFrame and a Point
Parameters
==========
name : str, optional
Name assigned to VisualizationFrame, default is unnamed
reference_frame : ReferenceFrame
A reference_frame with respect to which all orientations of the
shape takes place, during visualizations/animations.
origin : Point
A point with respect to which all the translations of the shape
takes place, during visualizations/animations.
rigidbody : RigidBody
A rigidbody whose reference frame and mass center are to be
assigned as reference_frame and origin of the
VisualizationFrame.
particle : Particle
A particle whose point is assigned as origin of the
VisualizationFrame.
shape : Shape
A shape to be attached to the VisualizationFrame
Examples
========
>>> from pydy.viz import VisualizationFrame, Sphere
>>> from sympy.physics.mechanics import \
ReferenceFrame, Point, RigidBody, \
Particle, inertia
>>> from sympy import symbols
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> shape = Sphere(5)
>>> #initializing with reference frame, point
>>> frame1 = VisualizationFrame('frame1', I, O, shape)
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> # Initializing with a rigidbody ..
>>> frame2 = VisualizationFrame('frame2', rbody, shape)
>>> Pa = Particle('Pa', O, mass)
>>> #initializing with Particle, reference_frame ...
>>> frame3 = VisualizationFrame('frame3', I, Pa, shape)
"""
#Last arg should be a Shape ..
if isinstance(args[-1], Shape):
self._shape = args[-1]
else:
raise TypeError("Please provide a valid shape object as the last "
" positional argument.")
i = 0
# If first arg is not str, name the visualization frame 'unnamed'
if isinstance(args[i], str):
self.name = args[i]
i += 1
else:
self.name = 'unnamed'
try:
if sympy_equal_to_or_newer_than('1.0'):
self.reference_frame = args[i].frame
else:
self.reference_frame = args[i].get_frame()
self.origin = args[i].masscenter
except AttributeError:
#It is not a rigidbody, hence this arg should be a
#reference frame
try:
dcm = args[i]._dcm_dict
self.reference_frame = args[i]
i += 1
except AttributeError:
raise TypeError(''' A ReferenceFrame is to be supplied
before a Particle/Point. ''')
#Now next arg can either be a Particle or point
try:
self.origin = args[i].point
except AttributeError:
self.origin = args[i]
#setting attributes ..
def __str__(self):
return 'VisualizationFrame ' + self.name
def __repr__(self):
return 'VisualizationFrame'
@property
def name(self):
"""
Name of the VisualizationFrame.
"""
return self._name
@name.setter
def name(self, new_name):
"""
Sets the name of the VisualizationFrame.
"""
if not isinstance(new_name, str):
raise TypeError('''Name should be a str object''')
else:
self._name = new_name
@property
def origin(self):
"""
Origin of the VisualizationFrame,
with respect to which all translational transformations
take place.
"""
return self._origin
@origin.setter
def origin(self, new_origin):
"""
Sets the origin of the VisualizationFrame.
"""
if not isinstance(new_origin, Point):
raise TypeError('''origin should be a valid Point Object''')
else:
self._origin = new_origin
@property
def reference_frame(self):
"""
reference_frame of the VisualizationFrame,
with respect to which all rotational/orientational
transformations take place.
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, new_reference_frame):
if not isinstance(new_reference_frame, ReferenceFrame):
raise TypeError('''reference_frame should be a valid
ReferenceFrame object.''')
else:
self._reference_frame = new_reference_frame
@property
def shape(self):
"""
shape in the VisualizationFrame.
A shape attached to the visualization frame.
NOTE: Only one shape can be attached to a visualization frame.
"""
return self._shape
@shape.setter
def shape(self, new_shape):
"""
Sets the shape for VisualizationFrame.
"""
if not isinstance(new_shape, Shape):
raise TypeError('''shape should be a valid Shape object.''')
else:
self._shape = new_shape
def generate_transformation_matrix(self, reference_frame, point):
"""Generates a symbolic transformation matrix, with respect to the
provided reference frame and point.
Parameters
==========
reference_frame : ReferenceFrame
A reference_frame with respect to which transformation matrix is
generated.
point : Point
A point with respect to which transformation matrix is
generated.
Returns
=======
A 4 x 4 SymPy matrix, containing symbolic expressions describing the
transformation as a function of time.
"""
rotation_matrix = self.reference_frame.dcm(reference_frame)
self._transform = Identity(4).as_mutable()
self._transform[:3, :3] = rotation_matrix
point_vector = self.origin.pos_from(point)
try:
self._transform[3, :3] = point_vector.to_matrix(reference_frame).T
except AttributeError:
# In earlier versions of sympy, 'Vector' object has no attribute
# 'to_matrix'.
self._transform[3, 0] = point_vector.dot(reference_frame.x)
self._transform[3, 1] = point_vector.dot(reference_frame.y)
self._transform[3, 2] = point_vector.dot(reference_frame.z)
return self._transform
def generate_numeric_transform_function(self, dynamic_variables,
constant_variables):
"""Returns a function which can compute the numerical values of the
transformation matrix given the numerical dynamic variables (i.e.
functions of time or states) and the numerical system constants.
Parameters
==========
dynamic_variables : list of sympy.Functions(time)
All of the dynamic symbols used in defining the orientation and
position of this visualization frame.
constant_variables : list of sympy.Symbols
All of the constants used in defining the orientation and
position of this visualization frame.
Returns
=======
numeric_transform : list of functions
A list of functions which return the numerical transformation
for each element in the transformation matrix.
"""
dummy_symbols = [Dummy() for i in dynamic_variables]
dummy_dict = dict(zip(dynamic_variables, dummy_symbols))
transform = self._transform.subs(dummy_dict).reshape(16, 1)
dummy_symbols.extend(constant_variables)
# Create a numeric transformation for each element in the transformation
# matrix. We cannot lambdify the transformation matrix as calling
# lambdify of a constant expression returns a scalar, even if the
# lambdify function arguments are sequences:
# https://github.com/sympy/sympy/issues/5642
self._numeric_transform = []
for i in range(16):
t = transform[i]
if t.has(Dummy):
f = lambdify(dummy_symbols, t, modules='numpy')
else:
f = lambdify(constant_variables, t, modules='numpy')
self._numeric_transform.append(f)
return self._numeric_transform
def evaluate_transformation_matrix(self, dynamic_values, constant_values):
"""Returns the numerical transformation matrices for each time step.
Parameters
----------
dynamic_values : array_like, shape(m,) or shape(n, m)
The m state values for each n time step.
constant_values : array_like, shape(p,)
The p constant parameter values of the system.
Returns
-------
transform_matrix : numpy.array, shape(n, 16)
A 4 x 4 transformation matrix for each time step.
"""
#If states is instance of numpy array, well and good.
#else convert it to one:
states = np.squeeze(np.array(dynamic_values))
if not isinstance(constant_values, Iterator):
constant_values = list(constant_values)
if len(states.shape) > 1:
n = states.shape[0]
args = []
for a in np.split(states, states.shape[1], 1):
args.append(np.squeeze(a))
for a in constant_values:
args.append(np.repeat(a, n))
else:
n = 1
args = np.hstack((states, constant_values))
new = np.zeros((n, 16))
for i, t in enumerate(self._numeric_transform):
if callable(t):
try:
new[:, i] = t(*args)
except TypeError:
# dynamic values are not necessary so pass only constant
# values into transform function
new[:, i] = np.repeat(t(*constant_values), n)
else:
new[:, i] = np.repeat(t, n)
self._visualization_matrix = new.tolist()
return self._visualization_matrix
def generate_scene_dict(self, constant_map={}):
"""
This method generates information for a static
visualization in the initial conditions, in the form
of dictionary. This contains shape information
from `Shape.generate_dict()` followed by an
init_orientation Key.
Before calling this method, all the transformation matrix
generation methods should be called, or it will give an error.
Parameters
==========
constant_map : dictionary
Constant map is required when Shape contains sympy expressions.This
dictionary maps sympy expressions/symbols to numerical values(floats)
Returns
=======
A dictionary built with a call to `Shape.generate_dict`.
Additional keys included in the dict are following:
1. init_orientation: Specifies the initial orientation
of the `VisualizationFrame`.
2. reference_frame_name: Name(str) of the reference_frame
attached to this VisualizationFrame.
3. simulation_id: an arbitrary integer to map scene description
with the simulation data.
"""
scene_dict = { id(self): {} }
scene_dict[id(self)] = self.shape.generate_dict(constant_map=constant_map)
scene_dict[id(self)]['name'] = self.name
scene_dict[id(self)]["reference_frame_name"] = str(self.reference_frame)
scene_dict[id(self)]["simulation_id"] = id(self)
try:
scene_dict[id(self)]["init_orientation"] = self._visualization_matrix[0]
except:
raise RuntimeError("Cannot generate visualization data " + \
"because numerical transformation " + \
"has not been performed, " + \
"Please call the numerical " + \
"transformation methods, " + \
"before generating visualization dict")
return scene_dict
def generate_simulation_dict(self):
"""
Generates the simulation information for this visualization
frame. It maps the simulation data information to the
scene information via a unique id.
Before calling this method, all the transformation matrix
generation methods should be called, or it will give an error.
Returns
=======
A dictionary containing list of 4x4 matrices mapped to
the unique id as the key.
"""
simulation_dict = {}
try:
simulation_dict[id(self)] = self._visualization_matrix
except:
raise RuntimeError("Cannot generate visualization data " + \
"because numerical transformation " + \
"has not been performed, " + \
"Please call the numerical " + \
"transformation methods, " + \
"before generating visualization dict")
return simulation_dict
| {
"repo_name": "oliverlee/pydy",
"path": "pydy/viz/visualization_frame.py",
"copies": "1",
"size": "15358",
"license": "bsd-3-clause",
"hash": 5979355711289667000,
"line_mean": 35.9182692308,
"line_max": 84,
"alpha_frac": 0.5886834223,
"autogenerated": false,
"ratio": 4.787406483790524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015485349359475038,
"num_lines": 416
} |
__all__ = [ "voxellise",
"shrinkDisk" ]
import image
import _irtk
import numpy as np
def voxellise( points, triangles, header=None, pixelSize=[1,1,1,1] ):
points = np.array( points, dtype='float64' )
triangles = np.array( triangles, dtype='int32' )
if header is None:
pixelSize = np.array( pixelSize, dtype='float64')
x_min, y_min, z_min = points.min(axis=0) - pixelSize[:3]
x_max, y_max, z_max = points.max(axis=0) + pixelSize[:3]
origin = [x_min + (x_max-x_min)/2,
y_min + (y_max-y_min)/2,
z_min + (z_max-z_min)/2,
0]
dim = [ (x_max - x_min) / pixelSize[0] + 1,
(y_max - y_min) / pixelSize[1] + 1,
(z_max - z_min) / pixelSize[2] + 1,
1]
header = image.new_header( origin=origin,
dim=dim,
pixelSize=pixelSize )
img = _irtk.voxellise( points, triangles, header )
return image.Image( img, header )
def shrinkDisk( img,
center=None,
radius=None,
steps=50 ):
if center is None:
center = np.array([float(img.shape[0])/2,
float(img.shape[1])/2],
dtype='float64')
if radius is None:
radius = float(img.shape[0])/2
img = img.astype('uint8').copy()
center = np.array( center, dtype='float64' ).copy()
return _irtk.shrinkDisk(img, center, radius, steps )
| {
"repo_name": "BioMedIA/IRTK",
"path": "wrapping/cython/irtk/vtk2irtk.py",
"copies": "5",
"size": "1551",
"license": "apache-2.0",
"hash": -1237808093245144000,
"line_mean": 32.7173913043,
"line_max": 69,
"alpha_frac": 0.4932301741,
"autogenerated": false,
"ratio": 3.321199143468951,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02045448262035918,
"num_lines": 46
} |
__all__ = ['vseval', 'Env', 'UnboundVar', 'GLOBAL_ENV']
from collections import namedtuple
class LispException(Exception):
pass
class UnboundVar(LispException):
pass
compound_procedure = namedtuple('compound_procedure', 'params, body, env')
def vseval(exp, env):
# properly tail recursive
while True:
if is_self_evaluating(exp):
return exp
if isinstance(exp, str):
# exp must be a variable at this point
return env.lookup(exp)
cmd, *args = exp
if cmd == 'quote':
return text_of_quotation(exp, env)
if cmd == 'set!':
return env.assign(exp)
if cmd == 'define':
return env.define(to_lambda(exp))
if cmd == 'if':
test, yes, no = args
exp = yes if vseval(test, env) != 'false' else no
continue
if cmd == 'lambda':
params, *body = args
# attach 'begin' if body contains multiple actions
body = body[0] if len(body) == 1 else ['begin'] + body
return compound_procedure(params, body, env)
if cmd == 'begin':
*actions, exp = args
for act in actions:
vseval(act, env)
continue
# And it's a procedure application
proc = vseval(cmd, env)
args = [vseval(arg, env) for arg in args]
if isinstance(proc, compound_procedure):
env = proc.env.extend(proc.params, args)
exp = proc.body
continue
return proc(*args)
def is_self_evaluating(exp):
"""number, string, booleans
"""
return \
isinstance(exp, int) or isinstance(exp, float) \
or (isinstance(exp, str) and len(exp) >= 2 and exp[0] == '"' and exp[-1] == '"') \
or exp == 'true' or exp == 'false'
def text_of_quotation(exp, env):
""" '(1 a) => (list '1 'a) and evaluate
"""
_, text = exp
if isinstance(text, list):
return vseval(["list"] + [['quote', x] for x in text], env)
return text
def to_lambda(exp):
"(define (foo x) ...) => (define foo (lambda (x) ...))"
_, var, *body = exp
if isinstance(var, list):
name, *params = var
return ['define', name, ['lambda', params] + body]
return exp
class Env:
def __init__(self, frame={}):
self.frame = frame
# Upper Env, not upper frame
self.upper = None
def lookup(self, var):
try:
return self.frame[var]
except KeyError:
upper_env = self.upper
if upper_env == None:
raise UnboundVar(var)
return upper_env.lookup(var)
def assign(self, exp):
_, var, valexp = exp
# evaluate the value expression first before the assignment
val = vseval(valexp, self)
def env_loop(env):
try:
env.frame[var]
except KeyError:
upper_env = env.upper
if upper_env == None:
raise UnboundVar(var)
env_loop(upper_env)
# var exists at this point
else:
env.frame[var] = val
env_loop(self)
def define(self, exp):
_, var, val = exp
self.frame[var] = vseval(val, self)
def extend(self, params, args):
newframe = {}
for p, a in zip(params, args):
newframe[p] = a
newenv = Env(newframe)
newenv.upper = self
return newenv
def setup_global_env():
import operator
from functools import reduce
class cons(namedtuple('cons', 'car, cdr')):
__slots__ = ()
def __str__(self):
elts = [str(self.car)]
cdr = self.cdr
while isinstance(cdr, cons):
elts.append(str(cdr.car))
cdr = cdr.cdr
if cdr != []:
elts.append('.')
elts.append(str(cdr))
return '(' + ' '.join(elts) + ')'
def lisp_list(*args):
args = list(args)
result = []
while args:
result = cons(args.pop(), result)
return result
def lisp_compare(xs, pred):
for x1, x2 in zip(xs, xs[1:]):
if not pred(x1, x2):
return 'false'
return 'true'
frame = GLOBAL_ENV.frame
frame['+'] = lambda *xs: sum(xs)
frame['-'] = lambda *xs: reduce(lambda x, y: x - y, xs)
frame['*'] = lambda *xs: reduce(lambda x, y: x * y, xs)
frame['/'] = lambda *xs: reduce(lambda x, y: x / y, xs)
frame['rem'] = lambda a, b: a % b
frame['null?'] = lambda x: 'true' if x == [] else 'false'
frame['cons'] = cons
frame['car'] = lambda x: x.car
frame['cdr'] = lambda x: x.cdr
frame['list'] = lisp_list
frame['not'] = lambda x: 'true' if x == 'false' else 'false'
frame['='] = lambda *xs: lisp_compare(xs, operator.eq)
frame['equal?'] = lambda *xs: lisp_compare(xs, operator.eq)
frame['<'] = lambda *xs: lisp_compare(xs, operator.lt)
frame['>'] = lambda *xs: lisp_compare(xs, operator.gt)
frame['<='] = lambda *xs: lisp_compare(xs, operator.le)
frame['>='] = lambda *xs: lisp_compare(xs, operator.ge)
frame['display'] = print
GLOBAL_ENV = Env()
setup_global_env()
| {
"repo_name": "nalssee/SICP",
"path": "vanilla_scheme/vseval.py",
"copies": "1",
"size": "5325",
"license": "mit",
"hash": 5173525863171278000,
"line_mean": 27.1746031746,
"line_max": 90,
"alpha_frac": 0.5162441315,
"autogenerated": false,
"ratio": 3.619986403806934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9634112813124344,
"avg_score": 0.000423544436518047,
"num_lines": 189
} |
"""All walls, roads, etc goes here."""
# coding=utf-8
from PyQt5.QtWidgets import QGraphicsRectItem
from PyQt5.QtCore import QRectF, Qt
from PyQt5.QtGui import QImage, QPen
class Object(QGraphicsRectItem):
def __init__(self, x=0, y=0, width=100, height=100):
super().__init__(x, y, width, height)
self.expandable = True
self.resizable = False
def setBoundingRect(self, x, y, width, height):
self.setRect(x, y, width, height)
class BackgroundImage(Object):
def __init__(self, image, opacity=1):
super().__init__()
self.opacity = opacity
self.image = image
def paint(self, painter, style=None, widget=None):
super().paint(painter, style, widget)
painter.setOpacity(self.opacity)
source = QRectF(self.image.rect())
target = self.scene().sceneRect()
painter.drawImage(target, self.image, source)
class Wall(Object):
def paint(self, painter, style=None, widget=None):
super().paint(painter, style, widget)
painter.eraseRect(self.boundingRect())
source = QRectF(0, 0, 64, 64)
target = self.boundingRect()
image = QImage('sprites/wall.png')
painter.drawImage(target, image, source)
class Air(Object):
def __init__(self):
super().__init__()
self.setPen(QPen(Qt.transparent))
def paint(self, painter, style=None, widget=None):
super().paint(painter, style, widget)
painter.setOpacity(0)
class Actor(Object):
def __init__(self):
super().__init__()
self.keys = {Qt.Key_Left: lambda: self.moveBy(-10, 0),
Qt.Key_Right: lambda: self.moveBy(10, 0),
Qt.Key_Down: lambda: self.moveBy(0, 10),
Qt.Key_Up: lambda: self.moveBy(0, -10)}
def paint(self, painter, style=None, widget=None):
super().paint(painter, style, widget)
painter.drawRect(self.boundingRect())
def keyPressEvent(self, event):
super().keyPressEvent(event)
try:
self.keys[event.key()]()
except KeyError:
pass
| {
"repo_name": "Shurup228/start2k17",
"path": "objects/map_elements.py",
"copies": "1",
"size": "2139",
"license": "mit",
"hash": -2865377750924771000,
"line_mean": 26.4230769231,
"line_max": 62,
"alpha_frac": 0.5960729313,
"autogenerated": false,
"ratio": 3.5590682196339434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9655141150933944,
"avg_score": 0,
"num_lines": 78
} |
__all__ = ['watchers',
<!--(for g in groups)-->
'$!g['groupname']!$',
<!--(end)-->
]
for module in __all__:
__import__("shm.{0}".format(module))
from shm.base import auv_var_lib
_init = auv_var_lib.shm_init
_init.argtypes = []
_init()
class ShmEvalError(Exception):
pass
def _eval(x):
"""
Get a shared group or variable.
:param x: Name of the group or variable. Must be of type str, e.g. desires.depth.
:return: Group or variable named x.
:raise ShmEvalError: Cannot evaluate input.
"""
assert isinstance(x, str), "_eval parameter must be a string"
sval = x.split(".")
if len(sval) > 2:
raise ShmEvalError(str(x) + " - malformed shared variable")
grp = sval[0]
if grp not in __all__:
raise ShmEvalError(str(x) + " - group not found")
mod = eval(grp)
if len(sval) == 2:
vr = sval[1]
if vr not in mod.__dict__:
raise ShmEvalError(str(x) + " - variable not found")
return eval(grp + "." + vr)
else:
return mod
| {
"repo_name": "cuauv/software",
"path": "libshm/templates/init.py",
"copies": "1",
"size": "1074",
"license": "bsd-3-clause",
"hash": -1130039313483525100,
"line_mean": 21.375,
"line_max": 85,
"alpha_frac": 0.5512104283,
"autogenerated": false,
"ratio": 3.2059701492537314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42571805775537314,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Waterspout', 'App']
import os
import inspect
import tornado.web
import tornado.options
from jinja2 import Environment, FileSystemLoader
from .config import Config
from .utils import get_root_path
from tornado.options import define, options
define('config', default='', help='path to the config file', type=str)
class Waterspout(object):
"""
"""
def __init__(self, import_name=None, handlers=None, **config):
if handlers is None:
handlers = []
if import_name is not None:
self.root_path = get_root_path(import_name)
else:
caller = inspect.stack()[1]
caller_module = inspect.getmodule(caller[0])
caller_path = os.path.abspath(caller_module.__file__)
self.root_path = os.path.dirname(caller_path)
self.handlers = handlers
if "static_path" not in config:
config["static_path"] = os.path.join(self.root_path, "static")
if "static_handler_class" not in config:
from .web import StaticFileHandler
config["static_handler_class"] = StaticFileHandler
if "xsrf_cookies" not in config:
config["xsrf_cookies"] = True
template_path = config.get("template_path", None)
if not (template_path and isinstance(template_path, str)):
self.template_paths = [os.path.join(self.root_path, "templates")]
else:
if not os.path.isabs(template_path):
template_path = os.path.join(self.root_path, template_path)
self.template_paths = [os.path.abspath(template_path)]
self.config = Config(self.root_path, config)
self._user_loader = None
self.filters = {}
def filter(self, f):
"""
Decorator to add a filter to Waterspout.
Add your filter like ::
waterspout = Waterspout()
@waterspout.filter
def sort(l):
return l.sort()
And use it in your template ::
{{ [1, 4, 5, 3, 2] | sort }}
:param f: function to add as a filter.
"""
self.filters[f.__name__] = f
return f
def add_handler(self, pattern, handler_class, kwargs=None, name=None):
"""
Add a handler_class to App.
:param pattern:
Regular expression to be matched. Any groups in the regex
will be passed in to the handler's get/post/etc methods as
arguments.
:param handler_class: RequestHandler subclass to be invoked.
:param kwargs:
(optional) A dictionary of additional arguments to be passed
to the handler's constructor.
:param name:
(optional) A name for this handler. Used by
waterspout.reverse_url.
"""
urlspec = [pattern, handler_class]
if kwargs:
urlspec.append(kwargs)
if name:
urlspec.append(name)
self.handlers.append(urlspec)
def register_app(self, app, prefix='', domain=''):
"""
Register an app to waterspout.
:param app: A Waterspout app.
:param prefix:
URL prefix for this app.
Will be ``/<app_name>`` by default
:param domain:
Domain for this app.
"""
if app.parent is not None:
print("%s has been registered before." % app)
return
if not prefix:
prefix = '/%s' % app.name
self.template_paths.append(app.template_path)
if hasattr(app, "_user_loader"):
if self._user_loader:
raise RuntimeError("An user loader already registered"
"But %s app provided another." % app.name)
self._user_loader = app._user_loader
if prefix == '/':
handlers = app.handlers
else:
handlers = []
for handler_class in app.handlers:
url = '%s%s' % (prefix, handler_class[0])
new_handler_class = [url] + list(handler_class[1:])
handlers.append(tuple(new_handler_class))
if domain:
domain = "^{}$".format(domain.strip("^$"))
self.handlers += [domain, handlers]
else:
self.handlers += handlers
self.filters.update(app.filters)
app.parent = self
@property
def application(self):
application = tornado.web.Application(
handlers=self.handlers,
**self.config
)
auto_escape = self.config.get('autoescape', False)
env = Environment(
autoescape=auto_escape,
loader=FileSystemLoader(self.template_paths)
)
sentry_dsn = self.config.get('sentry_dsn', None)
if sentry_dsn:
try:
from raven.contrib.tornado import AsyncSentryClient
assert AsyncSentryClient
except ImportError:
import logging
logging.warning("Sentry support requires raven."
"Run: pip install raven")
application.sentry_client = None
else:
application.sentry_client = AsyncSentryClient(sentry_dsn)
env.filters = self.filters
application.env = env
application._user_loader = self._user_loader
return application
def TestClient(self):
"""
Return the TestClient.
Use it like ::
client = waterspout.TestClient()
assert client.get('/').body == 'Hello World'
"""
from waterspout.testing import TestClient
return TestClient(self.application)
def user_loader(self, f):
"""
Decoration to change the user loader function.
Example ::
@waterspout.user_loader
def load_user(session):
return User.get(int(session["id"]))
:param f: the user loader function
"""
self._user_loader = f
return f
def run(self):
"""
Run your Waterspout Application.
"""
from tornado.httpserver import HTTPServer
import tornado.ioloop
application = self.application
tornado.options.parse_command_line()
if options.config:
tornado.options.parse_config_file(options.config)
http_server = HTTPServer(application)
address = self.config.get('address', '127.0.0.1')
port = int(self.config.get('port', 8888))
http_server.listen(port, address)
import logging
logging.info("Start serving at %s:%s" % (address, port))
tornado.ioloop.IOLoop.instance().start()
class App(object):
"""
The App in Waterspout is just like the App in Django.
A Waterspout Application consists of plenty of Apps.
The minimal App ::
from waterspout.app import App
from waterspout.web import RequestHandler
class Foo(RequestHandler):
def get(self):
self.write('This is foo app.')
handlers = [
('/', Foo)
]
app = App('app name', __name__, handlers)
"""
def __init__(self, name, import_name=None, handlers=None):
self.name = name
if import_name is not None:
self.root_path = get_root_path(import_name)
else:
caller = inspect.stack()[1]
caller_module = inspect.getmodule(caller[0])
caller_path = os.path.abspath(caller_module.__file__)
self.root_path = os.path.dirname(caller_path)
self.template_path = os.path.join(self.root_path, "templates")
if handlers is None:
handlers = []
self.handlers = handlers
self.parent = None
self.filters = {}
def filter(self, f):
"""
Decorator to add a filter to Waterspout App.
Add your filter like ::
app = App('test')
@app.filter
def sort(l):
return l.sort()
And use it in your template ::
{{ [1, 4, 5, 3, 2] | sort }}
:param f: function to add as a filter.
"""
self.filters[f.__name__] = f
if self.parent is not None:
self.parent.filters.update(self.filters)
return f
def add_handler(self, pattern, handler_class, kwargs=None, name=None):
"""
Add a handler_class to App.
:param pattern:
Regular expression to be matched. Any groups in the regex
will be passed in to the handler's get/post/etc methods as
arguments.
:param handler_class: RequestHandler subclass to be invoked.
:param kwargs:
(optional) A dictionary of additional arguments to be passed
to the handler's constructor.
:param name:
(optional) A name for this handler. Used by
Waterspout.reverse_url.
"""
urlspec = [pattern, handler_class]
if kwargs:
urlspec.append(kwargs)
if name:
urlspec.append(name)
self.handlers.append(urlspec)
def __repr__(self):
return '<App %s>' % self.name
def TestClient(self):
"""
Return the TestClient for the current Waterspout.
Use it like ::
client = app.TestClient()
assert client.get('/').body == 'Hello World'
"""
Waterspout = self.parent
assert Waterspout is not None, \
"You need to register app before testing"
from waterspout.testing import TestClient
return TestClient(Waterspout.Waterspout)
def user_loader(self, f):
"""
Decoration to change the user loader function.
Example ::
@app.user_loader
def load_user(session):
return User.get(int(session["id"]))
:param f: the user loader function
"""
self._user_loader = f
return f
| {
"repo_name": "whtsky/Waterspout",
"path": "waterspout/app.py",
"copies": "1",
"size": "10052",
"license": "mit",
"hash": -2126149274333388500,
"line_mean": 29.1861861862,
"line_max": 77,
"alpha_frac": 0.5575009948,
"autogenerated": false,
"ratio": 4.299401197604791,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356902192404791,
"avg_score": null,
"num_lines": null
} |
__all__ = ["w_autocomplete",
]
# @todo Their are performance issues need to profile and find out in which functions are the bottlenecks
# Selenium WebDriver
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
#from selenium.webdriver.common.keys import Keys
from gluon import current
from s3 import s3_debug
import time
# -----------------------------------------------------------------------------
def w_autocomplete(search,
autocomplete,
needle = None,
quiet = True,
):
""" helper function to find a search string in an autocomplete """
import time
config = current.test_config
browser = config.browser
autocomplete_id = "dummy_%s_id" % autocomplete
throbber_id = "dummy_%s_id_throbber" % autocomplete
if needle == None:
needle = search
elem = browser.find_element_by_id(autocomplete_id)
elem.clear()
elem.send_keys(search)
# We will wait for up-to a minute for the data to arrive
# But try every second
for i in range(10):
# For each autocomplete on the form the menu will have an id starting from 0
automenu = 0
try:
menu = browser.find_element_by_id("ui-menu-%s" % automenu)
except:
menu = None
while menu:
# for each item in the menu it will have an id starting from 0
autoitem = 0
if not quiet:
print "Looking for element ui-menu-%s-%s" %(automenu,automenu)
try:
menuitem = browser.find_element_by_id("ui-menu-%s-%s" % (automenu,automenu))
except:
menuitem = None
while menuitem:
linkText = menuitem.text
if not quiet:
print "Looking for %s found %s" %(needle, linkText)
if needle in linkText:
# found the text need to click on it to get the db id
menuitem.click()
# wait for throbber to close
time.sleep(1)
giveup = 0
while browser.find_element_by_id(throbber_id).is_displayed:
time.sleep(1)
giveup += 1
if giveup == 20:
return
# throbber has closed and data was found, return
return
autoitem += 1
try:
menuitem = browser.find_element_by_id("%s-%s" % (menu,automenu))
except:
menuitem = None
# end of looping through each menu item
automenu += 1
try:
menu = browser.find_element_by_id("ui-menu-%s" % automenu)
except:
menu = None
# end of looping through each autocomplete menu
time.sleep(1)
| {
"repo_name": "flavour/iscram",
"path": "modules/tests/core/widgets.py",
"copies": "1",
"size": "2989",
"license": "mit",
"hash": -765098672739011500,
"line_mean": 35.0120481928,
"line_max": 104,
"alpha_frac": 0.5152224824,
"autogenerated": false,
"ratio": 4.515105740181269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5530328222581269,
"avg_score": null,
"num_lines": null
} |
__all__ = ["WebhookNotifier"]
from freight import http
from freight.models import App, Task, User
from .base import Notifier, generate_event_title
def stringify_date(date):
return date.isoformat() + "Z" if date else None
class WebhookNotifier(Notifier):
def get_options(self):
return {"url": {"required": True}, "headers": {"required": False}}
def send_deploy(self, deploy, task, config, event):
url = config["url"]
app = App.query.get(deploy.app_id)
task = Task.query.get(deploy.task_id)
user = User.query.get(task.user_id)
title = generate_event_title(app, deploy, task, user, event)
payload = {
"app_name": app.name,
"date_created": stringify_date(task.date_created),
"date_started": stringify_date(task.date_started),
"date_finished": stringify_date(task.date_finished),
"deploy_number": deploy.number,
"duration": task.duration,
"environment": deploy.environment,
"link": http.absolute_uri(
f"/deploys/{app.name}/{deploy.environment}/{deploy.number}/"
),
"params": dict(task.params or {}),
"previous_sha": app.get_previous_sha(
deploy.environment, current_sha=task.sha
),
"ref": task.ref,
"sha": task.sha,
"status": str(event),
"title": title,
"user": user.name,
"user_id": user.id,
}
http.post(url, headers=config.get("headers", {}), json=payload)
| {
"repo_name": "getsentry/freight",
"path": "freight/notifiers/webhook.py",
"copies": "1",
"size": "1596",
"license": "apache-2.0",
"hash": -6516808958275180000,
"line_mean": 32.25,
"line_max": 76,
"alpha_frac": 0.563283208,
"autogenerated": false,
"ratio": 3.790973871733967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4854257079733967,
"avg_score": null,
"num_lines": null
} |
__all__ = ["WebServer"]
from json import dumps as json_encode
from os.path import dirname, join as path_join
from typing import Any, Dict, Mapping, Optional, Sequence, Set, Tuple, Type, Union
from mako.template import Template # type: ignore
from werkzeug.datastructures import Headers
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.routing import Map, Rule
from werkzeug.utils import redirect
from werkzeug.wrappers import Request, Response
from nichtparasoup.core.imagecrawler import BaseImageCrawler
from nichtparasoup.core.server import Server, ServerStatus, type_module_name_str
class JsonRespone(Response):
def __init__(
self,
response: Optional[Any] = None,
status: Optional[Union[str, int]] = None,
headers: Optional[Union[Headers, Mapping[str, str], Sequence[Tuple[str, str]]]] = None,
mimetype: Optional[str] = 'application/json',
content_type: Optional[str] = 'application/json',
direct_passthrough: bool = False,
) -> None:
super().__init__(
response=json_encode(response),
status=status,
headers=headers,
mimetype=mimetype,
content_type=content_type,
direct_passthrough=direct_passthrough
)
class WebServer(object):
_TEMPLATE_FILES = path_join(dirname(__file__), 'htdocs', 'template')
_STATIC_FILES = path_join(dirname(__file__), 'htdocs', 'static')
_STATIC_INDEX = 'index.html' # relative to cls._STATIC_FILES
def __init__(self, imageserver: Server, hostname: str, port: int) -> None: # pragma: no cover
self.imageserver = imageserver
self.hostname = hostname
self.port = port
self.url_map = Map([
Rule('/', endpoint='root'),
Rule('/get', endpoint='get'),
Rule('/status', endpoint='status'),
Rule('/status/<what>', endpoint='status_what'),
Rule('/reset', endpoint='reset'),
Rule('/css/sourceIcons.css', endpoint='sourceicons')
])
def __call__(self, environ: Dict[str, Any], start_response: Any) -> Any:
return self.wsgi_app(environ, start_response)
def dispatch_request(self, request: Request) -> Union[Response, HTTPException]:
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
response = getattr(self, 'on_{}'.format(endpoint))(request, **values) # type: Response
return response
except HTTPException as e:
return e
def wsgi_app(self, environ: Dict[str, Any], start_response: Any) -> Any:
request = Request(environ)
response = self.dispatch_request(request)
if isinstance(response, Response):
response.cache_control.no_cache = True
response.cache_control.no_store = True
return response(environ, start_response)
def on_root(self, _: Request) -> Response:
# relative-path is valid via https://tools.ietf.org/html/rfc3986#section-4.2
forward = redirect(self._STATIC_INDEX, code=302, Response=Response)
# to prevent extensive (reverse proxy) header parsing, it is kept as a relative-path
forward.autocorrect_location_header = False
return forward
def on_get(self, _: Request) -> Response:
image = self.imageserver.get_image()
return JsonRespone(image)
_STATUS_WHATS = dict(
server=ServerStatus.server,
blacklist=ServerStatus.blacklist,
crawlers=ServerStatus.crawlers,
)
def on_status(self, _: Request) -> Response:
status = {what: getter(self.imageserver) for what, getter in self._STATUS_WHATS.items()}
return JsonRespone(status)
def on_status_what(self, _: Request, what: str) -> Response:
status_what = self._STATUS_WHATS.get(what)
if not status_what:
raise NotFound()
status = status_what(self.imageserver)
return JsonRespone(status)
def on_reset(self, _: Request) -> Response:
reset = self.imageserver.request_reset()
return JsonRespone(reset)
def on_sourceicons(self, _: Request) -> Response:
imagecrawlers = {
type(crawler.imagecrawler)
for crawler
in self.imageserver.core.crawlers} # type: Set[Type[BaseImageCrawler]]
names_icons_list = [
(name, icon)
for name, icon
in [
(type_module_name_str(imagecrawler), imagecrawler.info().icon_url)
for imagecrawler
in imagecrawlers]
if icon]
# cannot use dict for `names_icons_list` in template. will break the template occasionally :-/
template = Template(filename=path_join(self._TEMPLATE_FILES, 'css', 'sourceIcons.css'))
css = template.render(names_icons_list=names_icons_list)
return Response(css, mimetype='text/css')
def run(self) -> None:
from werkzeug.serving import run_simple
from nichtparasoup._internals import _log
self.imageserver.start()
try:
_log('info', ' * starting {0} bound to {1.hostname} on port {1.port}'.format(type(self).__name__, self))
run_simple(
self.hostname, self.port,
application=self,
static_files={'/': self._STATIC_FILES},
processes=1, threaded=True,
use_reloader=False,
use_debugger=False)
_log('info', ' * stopped {0} bound to {1.hostname} on port {1.port}'.format(type(self).__name__, self))
except Exception as e:
_log('exception', ' * Error occurred. stopping everything')
raise e
finally:
self.imageserver.stop()
| {
"repo_name": "k4cg/nichtparasoup",
"path": "nichtparasoup/webserver/__init__.py",
"copies": "1",
"size": "5829",
"license": "mit",
"hash": 8435001186827339000,
"line_mean": 39.2,
"line_max": 116,
"alpha_frac": 0.6150283067,
"autogenerated": false,
"ratio": 4.053546592489569,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168574899189569,
"avg_score": null,
"num_lines": null
} |
__all__ = ('WebSocketResponse', 'MsgType')
import asyncio
import warnings
from . import hdrs
from .errors import HttpProcessingError, ClientDisconnectedError
from .websocket import do_handshake, Message, WebSocketError
from .websocket_client import MsgType, closedMessage
from .web_exceptions import (
HTTPBadRequest, HTTPMethodNotAllowed, HTTPInternalServerError)
from aio2py.required.aiohttp.web_reqrep import StreamResponse
THRESHOLD_CONNLOST_ACCESS = 5
class WebSocketResponse(StreamResponse):
def __init__(self, *,
timeout=10.0, autoclose=True, autoping=True, protocols=()):
super().__init__(status=101)
self._protocols = protocols
self._protocol = None
self._writer = None
self._reader = None
self._closed = False
self._closing = False
self._conn_lost = 0
self._close_code = None
self._loop = None
self._waiting = False
self._exception = None
self._timeout = timeout
self._autoclose = autoclose
self._autoping = autoping
def start(self, request):
# make pre-check to don't hide it by do_handshake() exceptions
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
try:
status, headers, parser, writer, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError as err:
if err.code == 405:
raise HTTPMethodNotAllowed(
request.method, [hdrs.METH_GET], body=b'')
elif err.code == 400:
raise HTTPBadRequest(text=err.message, headers=err.headers)
else: # pragma: no cover
raise HTTPInternalServerError() from err
if self.status != status:
self.set_status(status)
for k, v in headers:
self.headers[k] = v
self.force_close()
resp_impl = super().start(request)
self._reader = request._reader.set_parser(parser)
self._writer = writer
self._protocol = protocol
self._loop = request.app.loop
return resp_impl
def can_start(self, request):
if self._writer is not None:
raise RuntimeError('Already started')
try:
_, _, _, _, protocol = do_handshake(
request.method, request.headers, request.transport,
self._protocols)
except HttpProcessingError:
return False, None
else:
return True, protocol
@property
def closed(self):
return self._closed
@property
def close_code(self):
return self._close_code
@property
def protocol(self):
return self._protocol
def exception(self):
return self._exception
def ping(self, message='b'):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.ping(message)
def pong(self, message='b'):
# unsolicited pong
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
self._writer.pong(message)
def send_str(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, str):
raise TypeError('data argument must be str (%r)' % type(data))
self._writer.send(data, binary=False)
def send_bytes(self, data):
if self._writer is None:
raise RuntimeError('Call .start() first')
if self._closed:
raise RuntimeError('websocket connection is closing')
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)' %
type(data))
self._writer.send(data, binary=True)
@asyncio.coroutine
def wait_closed(self): # pragma: no cover
warnings.warn(
'wait_closed() coroutine is deprecated. use close() instead',
DeprecationWarning)
return (yield from self.close())
@asyncio.coroutine
def write_eof(self):
if self._eof_sent:
return
if self._resp_impl is None:
raise RuntimeError("Response has not been started")
yield from self.close()
self._eof_sent = True
@asyncio.coroutine
def close(self, *, code=1000, message=b''):
if self._writer is None:
raise RuntimeError('Call .start() first')
if not self._closed:
self._closed = True
try:
self._writer.close(code, message)
except (asyncio.CancelledError, asyncio.TimeoutError):
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if self._closing:
return True
while True:
try:
msg = yield from asyncio.wait_for(
self._reader.read(),
timeout=self._timeout, loop=self._loop)
except asyncio.CancelledError:
self._close_code = 1006
raise
except Exception as exc:
self._close_code = 1006
self._exception = exc
return True
if msg.tp == MsgType.close:
self._close_code = msg.data
return True
else:
return False
@asyncio.coroutine
def receive(self):
if self._reader is None:
raise RuntimeError('Call .start() first')
if self._waiting:
raise RuntimeError('Concurrent call to receive() is not allowed')
self._waiting = True
try:
while True:
if self._closed:
self._conn_lost += 1
if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
raise RuntimeError('WebSocket connection is closed.')
return closedMessage
try:
msg = yield from self._reader.read()
except (asyncio.CancelledError, asyncio.TimeoutError):
raise
except WebSocketError as exc:
self._close_code = exc.code
yield from self.close(code=exc.code)
return Message(MsgType.error, exc, None)
except ClientDisconnectedError:
self._closed = True
self._close_code = 1006
return Message(MsgType.close, None, None)
except Exception as exc:
self._exception = exc
self._closing = True
self._close_code = 1006
yield from self.close()
return Message(MsgType.error, exc, None)
if msg.tp == MsgType.close:
self._closing = True
self._close_code = msg.data
if not self._closed and self._autoclose:
yield from self.close()
return msg
elif not self._closed:
if msg.tp == MsgType.ping and self._autoping:
self._writer.pong(msg.data)
elif msg.tp == MsgType.pong and self._autoping:
continue
else:
return msg
finally:
self._waiting = False
@asyncio.coroutine
def receive_msg(self): # pragma: no cover
warnings.warn(
'receive_msg() coroutine is deprecated. use receive() instead',
DeprecationWarning)
return (yield from self.receive())
@asyncio.coroutine
def receive_str(self):
msg = yield from self.receive()
if msg.tp != MsgType.text:
raise TypeError(
"Received message {}:{!r} is not str".format(msg.tp, msg.data))
return msg.data
@asyncio.coroutine
def receive_bytes(self):
msg = yield from self.receive()
if msg.tp != MsgType.binary:
raise TypeError(
"Received message {}:{!r} is not bytes".format(msg.tp,
msg.data))
return msg.data
def write(self, data):
raise RuntimeError("Cannot call .write() for websocket")
| {
"repo_name": "lfblogs/aio2py",
"path": "aio2py/required/aiohttp/web_ws.py",
"copies": "1",
"size": "8999",
"license": "apache-2.0",
"hash": 5304132861120272000,
"line_mean": 32.8308270677,
"line_max": 79,
"alpha_frac": 0.5369485498,
"autogenerated": false,
"ratio": 4.814874264312467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5851822814112467,
"avg_score": null,
"num_lines": null
} |
__all__ = ["WebViewImpl"]
from base.log import VLOG
from base.bind import Bind
from web_view import WebView
from status import *
from ui_events import *
from js import *
from geolocation_override_manager import GeolocationOverrideManager
from debugger_tracker import DebuggerTracker
from javascript_dialog_manager import JavaScriptDialogManager
from navigation_tracker import NavigationTracker
from frame_tracker import FrameTracker
from devtools_client_impl import DevToolsClientImpl
from heap_snapshot_taker import HeapSnapshotTaker
from dom_tracker import DomTracker
import json
import copy
# EvaluateScriptReturnType
ReturnByValue = 0
ReturnByObject = 1
# return status and context_id<int>
def _GetContextIdForFrame(tracker, frame):
if not frame:
return Status(kOk), 0
(status, context_id) = tracker.GetContextIdForFrame(frame)
if status.IsError():
return status, 0
return Status(kOk), context_id
def _GetMouseEventAsString(typer):
if typer == kPressedMouseEventType:
return "mousePressed"
elif typer == kReleasedMouseEventType:
return "mouseReleased"
elif typer == kMovedMouseEventType:
return "mouseMoved"
else:
return ""
def _GetTouchEventAsString(typer):
if typer == kTouchStart:
return "touchStart"
elif typer == kTouchEnd:
return "touchEnd"
elif typer == kTouchMove:
return "touchMove"
else:
return ""
def _GetMouseButtonAsString(typer):
if typer == kLeftMouseButton:
return "left"
elif typer == kMiddleMouseButton:
return "middle"
elif typer == kRightMouseButton:
return "right"
elif typer == kNoneMouseButton:
return "none"
else:
return ""
def _GetKeyEventAsString(typer):
if typer == kKeyDownEventType:
return "keyDown"
elif typer == kKeyUpEventType:
return "keyUp"
elif typer == kRawKeyDownEventType:
return "rawKeyDown"
elif typer == kCharEventType:
return "char"
else:
return ""
def _GetPointStateString(typer):
if typer == kTouchStart:
return "touchPressed"
elif typer == kTouchEnd:
return "touchReleased"
elif typer == kTouchMove:
return "touchMoved"
else:
return ""
# result = response.result.result
def _EvaluateScript(client, context_id, expression, return_type, result):
params = {}
params["expression"] = expression
if context_id:
params["contextId"] = context_id
params["returnByValue"] = (return_type == ReturnByValue)
cmd_result = {}
status = client.SendCommandAndGetResult("Runtime.evaluate", params, cmd_result)
if status.IsError():
return status
was_thrown = cmd_result.get("wasThrown", None)
if type(was_thrown) != bool:
return Status(kUnknownError, "Runtime.evaluate missing 'wasThrown'")
if was_thrown:
description = cmd_result.get("result.description", "unknown")
return Status(kUnknownError, "Runtime.evaluate threw exception: " + description)
unscoped_result = {}
unscoped_result = cmd_result.get("result")
if type(unscoped_result) != dict:
return Status(kUnknownError, "evaluate missing dictionary 'result'")
result.clear()
result.update(unscoped_result)
return Status(kOk)
# resturn status, got_object<bool> and object_id<string>
def _EvaluateScriptAndGetObject(client, context_id, expression):
result = {}
object_id = ""
status = _EvaluateScript(client, context_id, expression, ReturnByObject, result)
if status.IsError():
return (status, False, object_id)
if not result.has_key("objectId"):
return (Status(kOk), False, object_id)
object_id = result.get("objectId")
if type(object_id) != str:
return (Status(kUnknownError, "evaluate has invalid 'objectId'"), False, object_id)
return (Status(kOk), True, object_id)
# result = response.result.result.value
def _EvaluateScriptAndGetValue(client, context_id, expression, result):
temp_result = {}
status = _EvaluateScript(client, context_id, expression, ReturnByValue, temp_result)
if status.IsError():
return status
typer = temp_result.get("type")
if type(typer) != str:
return Status(kUnknownError, "Runtime.evaluate missing string 'type'")
if typer == "undefined":
result.clear()
else:
if not temp_result.has_key("value"):
return Status(kUnknownError, "Runtime.evaluate missing 'value'")
result.clear()
# packed in a dict to make pass like point
if type(temp_result["value"]) != dict:
result.update({"value": temp_result["value"]})
else:
result.update(temp_result["value"])
return Status(kOk)
# return status, found_node<bool> and node_id<int>
def _GetNodeIdFromFunction(client, context_id, function, args):
node_id = -1
try:
js = json.dumps(args)
except:
return (Status(kUnknownError, "json dumps error"), False, node_id)
# TODO(wyh): Second null should be array of shadow host ids.
expression = "(%s).apply(null, [null, %s, %s, true])" % (kCallFunctionScript, function, js)
(status, got_object, element_id) = _EvaluateScriptAndGetObject(client, context_id, expression)
if status.IsError():
return (status, False, node_id)
if not got_object:
return (Status(kOk), False, node_id)
cmd_result = {}
params = {}
params["objectId"] = element_id
status = client.SendCommandAndGetResult("DOM.requestNode", params, cmd_result)
# Release the remote object before doing anything else.
params = {}
params["objectId"] = element_id
release_status = client.SendCommand("Runtime.releaseObject", params)
if release_status.IsError():
VLOG(3, "Failed to release remote object: " + release_status.Message())
if status.IsError():
return (status, False, node_id)
node_id = cmd_result.get("nodeId")
if type(node_id) != int:
return (Status(kUnknownError, "DOM.requestNode missing int 'nodeId'"), False, node_id)
return (Status(kOk), True, node_id)
def _ParseCallFunctionResult(dic, result):
if type(dic) != dict:
return Status(kUnknownError, "call function result must be a dictionary")
status_code = dic.get("status", None)
if type(status_code) != int:
return Status(kUnknownError, "call function result missing int 'status'")
if status_code != kOk:
message = dic.get("value", "")
return Status(status_code, message)
if not dic.has_key("value"):
return Status(kUnknownError, "call function result missing 'value'")
result.clear()
result.update({"value": dic["value"]})
return Status(kOk)
class WebViewImpl(WebView):
def __init__(self, sid, build_no, client):
WebView.__init__(self, sid)
self.build_no = build_no
self.client = client
# in case of casually init DevToolsClientImpl, may cause wrong init of DevToolsEventListener
if isinstance(client, DevToolsClientImpl):
self.dom_tracker = DomTracker(client)
self.frame_tracker = FrameTracker(client)
self.navigation_tracker = NavigationTracker(client)
self.dialog_manager = JavaScriptDialogManager(client)
self.geolocation_override_manager = GeolocationOverrideManager(client)
self.heap_snapshot_taker = HeapSnapshotTaker(client)
#self.debugger = DebuggerTracker(client)
else:
self.dom_tracker = None
self.frame_tracker = None
self.navigation_tracker = None
self.dialog_manager = None
self.geolocation_override_manager = None
self.heap_snapshot_taker = None
#self.debugger = None
def Update(self, other):
self.build_no = other.build_no
self.client = other.client
self.dom_tracker = other.dom_tracker
self.frame_tracker = other.frame_tracker
self.navigation_tracker = other.navigation_tracker
self.dialog_manager = other.dialog_manager
self.geolocation_override_manager = other.geolocation_override_manager
self.heap_snapshot_taker = other.heap_snapshot_taker
#self.debugger = other.debugger
# Overridden from WebView:
def GetId(self):
return self.sid
def WasCrashed(self):
return self.client.WasCrashed()
def ConnectIfNecessary(self):
return self.client.ConnectIfNecessary()
def HandleReceivedEvents(self):
return self.client.HandleReceivedEvents()
def Load(self, url):
# Javascript URLs will cause a hang while waiting for the page to stop
# loading, so just disallow.
if url.lower().startswith("javascript"):
return Status(kUnknownError, "unsupported protocol")
params = {}
params["url"] = url
return self.client.SendCommand("Page.navigate", params)
def Reload(self):
params = {}
params["ignoreCache"] = False
return self.client.SendCommand("Page.reload", params)
def DispatchTouchEvents(self, events=[]):
for it in events:
params = {}
params["type"] = _GetTouchEventAsString(it.typer)
point_list = []
point = {}
point["state"] = _GetPointStateString(it.typer)
point["x"] = it.x
point["y"] = it.y
point_list[0] = point
params["touchPoints"] = point_list
status = self.client.SendCommand("Input.dispatchTouchEvent", params)
if status.IsError():
return status
return Status(kOk)
def DispatchKeyEvents(self, events=[]):
for it in events:
params = {}
params["type"] = _GetKeyEventAsString(it.typer)
if it.modifiers & kNumLockKeyModifierMask:
params["isKeypad"] = True
params["modifiers"] = it.modifiers & (kNumLockKeyModifierMask - 1)
else:
params["modifiers"] = it.modifiers
params["text"] = it.modified_text
params["unmodifiedText"] = it.unmodified_text
params["nativeVirtualKeyCode"] = it.key_code
params["windowsVirtualKeyCode"] = it.key_code
status = self.client.SendCommand("Input.dispatchKeyEvent", params)
if status.IsError():
return status
return Status(kOk)
def DispatchMouseEvents(self, events, frame):
for it in events:
params = {}
params["type"] = _GetMouseEventAsString(it.typer)
params["x"] = it.x
params["y"] = it.y
params["modifiers"] = it.modifiers
params["button"] = _GetMouseButtonAsString(it.button)
params["clickCount"] = it.click_count
status = self.client.SendCommand("Input.dispatchMouseEvent", params)
if status.IsError():
return status
if self.build_no < 1569 and it.button == kRightMouseButton and it.typer == kReleasedMouseEventType:
args = []
args.append(it.x)
args.append(it.y)
args.append(it.modifiers)
result = {}
status = self.CallFunction(frame, kDispatchContextMenuEventScript, args, result)
if status.IsError():
return status
return Status(kOk)
def GetCookies(self, cookies=[]):
params = {}
result = {}
status = self.client.SendCommandAndGetResult("Page.getCookies", params, result)
if status.IsError():
return status
cookies_tmp = result.get("cookies")
if type(cookies_tmp) != list:
return Status(kUnknownError, "DevTools didn't return cookies")
cookies[:] = cookies_tmp
return Status(kOk)
def DeleteCookie(self, name, url):
params = {}
params["cookieName"] = name
params["url"] = url
return self.client.SendCommand("Page.deleteCookie", params)
def GetJavaScriptDialogManager(self):
return self.dialog_manager
def OverrideGeolocation(self, geoposition):
return self.geolocation_override_manager.OverrideGeolocation(geoposition)
def EvaluateScript(self, frame, expression, result):
(status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame)
if status.IsError():
return status
return _EvaluateScriptAndGetValue(self.client, context_id, expression, result)
def CallFunction(self, frame, function, args, result):
try:
js = json.dumps(args)
except:
return Status(kUnknownError)
# TODO(wyh): Second null should be array of shadow host ids.
expression = "(%s).apply(null, [null, %s, %s])" % (kCallFunctionScript, function, js)
temp_result = {}
status = self.EvaluateScript(frame, expression, temp_result)
if status.IsError():
return status
return _ParseCallFunctionResult(temp_result, result)
def CallAsyncFunctionInternal(self, frame, function, args, is_user_supplied, timeout, result):
async_args = []
async_args.append("return (" + function + ").apply(null, arguments);")
async_args.extend(args)
async_args.append(is_user_supplied)
# timeout should be in milliseconds
async_args.append(timeout)
tmp = {}
status = self.CallFunction(frame, kExecuteAsyncScriptScript, async_args, tmp)
if status.IsError():
return status
kDocUnloadError = "document unloaded while waiting for result"
kQueryResult = "function() {\
var info = document.$xwalk_asyncScriptInfo;\
if (!info)\
return {status: %d, value: '%s'};\
var result = info.result;\
if (!result)\
return {status: 0};\
delete info.result;\
return result;\
}" % (kJavaScriptError, kDocUnloadError)
while True:
no_args = []
query_value = {}
status = self.CallFunction(frame, kQueryResult, no_args, query_value)
if status.IsError():
if status.Code() == kNoSuchFrame:
return Status(kJavaScriptError, kDocUnloadError)
return status
if type(query_value) != dict:
return Status(kUnknownError, "async result info is not a dictionary")
status_code = query_value.get("status", None)
if type(status_code) != int:
return Status(kUnknownError, "async result info has no int 'status'")
if status_code != kOk:
return Status(status_code, str(query_value.get("value")))
if query_value.has_key("value"):
result.clear()
result.update(query_value["value"])
return Status(kOk)
time.sleep(0.1)
def CallAsyncFunction(self, frame, function, args, timeout, result):
return self.CallAsyncFunctionInternal(frame, function, args, False, timeout, result)
def CallUserAsyncFunction(self, frame, function, args, timeout, result):
return self.CallAsyncFunctionInternal(frame, function, args, True, timeout, result)
# return status and is_not_pending<bool>
def IsNotPendingNavigation(self, frame_id):
(status, is_pending) = self.navigation_tracker.IsPendingNavigation(frame_id)
if status.IsError():
return (status, True)
if is_pending and self.dialog_manager.IsDialogOpen():
return (Status(kUnexpectedAlertOpen), False)
is_not_pending = not is_pending
return (Status(kOk), is_not_pending)
# return status and is_pending<bool>
def IsPendingNavigation(self, frame_id):
return self.navigation_tracker.IsPendingNavigation(frame_id)
def WaitForPendingNavigations(self, frame_id, timeout, stop_load_on_timeout):
VLOG(0, "Waiting for pending navigations...")
status = self.client.HandleEventsUntil(Bind(self.IsNotPendingNavigation, [frame_id]), timeout)
if status.Code() == kTimeout and stop_load_on_timeout:
VLOG(0, "Timed out. Stopping navigation...")
unused_value = {}
self.EvaluateScript("", "window.stop();", unused_value)
new_status = self.client.HandleEventsUntil(Bind(self.IsNotPendingNavigation, [frame_id]), 10)
if new_status.IsError():
status = new_status
VLOG(0, "Done waiting for pending navigations")
return status
def TakeHeapSnapshot(self):
return self.heap_snapshot_taker.TakeHeapSnapshot()
# return status and out_frame<string>
def GetFrameByFunction(self, frame, function, args):
(status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame)
if status.IsError():
return status
found_node = False
node_id = -1
(status, found_node, node_id) = _GetNodeIdFromFunction(self.client, context_id, function, args)
if status.IsError():
return status
if not found_node:
return Status(kNoSuchFrame)
return self.dom_tracker.GetFrameIdForNode(node_id)
def SetFileInputFiles(self, frame, element, files):
file_list = []
for i in files:
if not i.startswith("/"):
return Status(kUnknownError, "path is not absolute: " + i)
if i.find(".") != -1:
return Status(kUnknownError, "path is not canonical: " + i)
file_list.append(i)
(status, context_id) = _GetContextIdForFrame(self.frame_tracker, frame)
if status.IsError():
return status
args = []
args.append(copy.deepcopy(element))
(status, found_node, node_id) = _GetNodeIdFromFunction(self.client, context_id, "function(element) { return element; }", args)
if status.IsError():
return status
if not found_node:
return Status(kUnknownError, "no node ID for file input")
params = {}
params["nodeId"] = node_id
params["files"] = file_list
return self.client.SendCommand("DOM.setFileInputFiles", params)
# return status and screenshot<string>
def CaptureScreenshot(self):
result = {}
status = self.client.SendCommandAndGetResult("Page.captureScreenshot", {}, result)
if status.IsError():
return (status, "")
screenshot = result.get("data")
if type(screenshot) != str:
return (Status(kUnknownError, "expected string 'data' in response"), "")
return (Status(kOk), screenshot)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/web_view_impl.py",
"copies": "1",
"size": "17244",
"license": "bsd-3-clause",
"hash": -239428404352996160,
"line_mean": 34.701863354,
"line_max": 130,
"alpha_frac": 0.6850498724,
"autogenerated": false,
"ratio": 3.6580398812049215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4843089753604921,
"avg_score": null,
"num_lines": null
} |
__all__ = ["WebView"]
from status import *
class WebView(object):
def __init__(self, sid=""):
self.sid = sid
#verridden from WebView:
def GetId(self):
return self.sid
def WasCrashed(self):
return False
def ConnectIfNecessary(self):
return Status(kOk)
def HandleReceivedEvents(self):
return Status(kOk)
def Load(self, url):
return Status(kOk)
def Reload(self):
return Status(kOk)
def EvaluateScript(self, frame, function, result):
return Status(kOk)
def CallFunction(self, frame, function, args, result):
return Status(kOk)
def CallAsyncFunction(self, frame, function, args, timeout, result):
return Status(kOk)
def CallUserAsyncFunction(self, frame, function, args, timeout, result):
return Status(kOk)
# return status and out_frame<string> #
def GetFrameByFunction(self, frame="", function="", args=[]):
return Status(kOk), None
def DispatchMouseEvents(self, events=[], frame=""):
return Status(kOk)
def DispatchTouchEvents(self, events=[]):
return Status(kOk)
def DispatchKeyEvents(self, events=[]):
return Status(kOk)
def GetCookies(self, cookies=[]):
return Status(kOk)
def DeleteCookie(self, name="", url=""):
return Status(kOk)
def WaitForPendingNavigations(self, frame_id="", timeout=0, stop_load_on_timeout=0):
return Status(kOk)
# return status and is_pending<bool> #
def IsPendingNavigation(self, frame_id=""):
return Status(kOk), False
def GetJavaScriptDialogManager(self):
return None
def OverrideGeolocation(self, geoposition):
return Status(kOk)
# return status and screenshot<string>
def CaptureScreenshot(self):
return Status(kOk), ""
def SetFileInputFiles(self, frame="", element={}, files=[]):
return Status(kOk)
# return status and snapshot<value>
def TakeHeapSnapshot(self):
return Status(kOk), None
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/web_view.py",
"copies": "1",
"size": "1902",
"license": "bsd-3-clause",
"hash": -1669664053449043000,
"line_mean": 21.9156626506,
"line_max": 86,
"alpha_frac": 0.683491062,
"autogenerated": false,
"ratio": 3.6647398843930636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48482309463930634,
"avg_score": null,
"num_lines": null
} |
# all weights in g/mol
CO2 = 44.01
CH4 = 16.04
N2O = 44.013
CF4 = 88.0043
C2F6 = 138.01
C3F8 = 188.02
C4F10 = 238.028
C5F12 = 288.036
C6F14 = 338.041845
C7F16 = 388.051
C8F18 = 438.06
C_C4F8 = 200.03
HFC23 = 70.01
HFC32 = 52.02
HFC43_10 = 252.055
HFC43_10MEE = HFC43_10
HFC125 = 120.02
HFC134A = 102.03
HFC143A = 84.04
HFC152A = 66.05
HFC227EA = 170.03
HFC236FA = 152.039
HFC245FA = 134.05
HFC365MFC = 148.074
SF6 = 146.06
NF3 = 71.00
SO2F2 = 102.06
CFC11 = 137.37
CFC12 = 120.91
CFC113 = 187.376
CFC114 = 170.92
CFC115 = 154.466
CARB_TET = 153.81
CCL4 = CARB_TET
MCF = 133.4
CH3CCL3 = MCF
HCFC22 = 86.47
HCFC141B = 116.94
HCFC142B = 100.49
HALON1211 = 165.36
HALON1202 = 209.82
HALON1301 = 148.91
HALON2402 = 259.823
CH3BR = 94.94
CH3CL = 50.49
CH2CL2 = 84.93
CHCL3 = 119.37
N2 = 28.013
N = 14.0067
C = 12.01
S = 32.065
SO2 = 64.066
NO = 30.01
NO2 = 46.0055
O3 = 48.0
O2 = 31.9988
AIR = 28.97
# This is the list of gases included in the RCPs/AR5/CMIP5.
aslist = [CO2, CH4, N2O, CF4, C2F6, C6F14, HFC23, HFC32, HFC43_10, HFC125,
HFC134A, HFC143A, HFC227EA, HFC245FA, SF6, CFC11, CFC12, CFC113,
CFC114, CFC115, CARB_TET, MCF, HCFC22, HCFC141B, HCFC142B,
HALON1211, HALON1202, HALON1301, HALON2402, CH3BR, CH3CL]
| {
"repo_name": "OMS-NetZero/FAIR",
"path": "fair/constants/molwt.py",
"copies": "1",
"size": "1592",
"license": "apache-2.0",
"hash": -6132232311129671000,
"line_mean": 23.1212121212,
"line_max": 79,
"alpha_frac": 0.523241206,
"autogenerated": false,
"ratio": 2.180821917808219,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.32040631238082196,
"avg_score": null,
"num_lines": null
} |
__all__ = ('WidgetManager')
import string
from copy import deepcopy
from kivy.core.window import Window
from pocketthrone.entities.event import *
from pocketthrone.entities.enum import WidgetState, WidgetAction, GameState, CoordinateAxis
from pocketthrone.entities.tilemap import TileMap, GridTranslation
from pocketthrone.managers.eventmanager import EventManager
from pocketthrone.widgets.sidebar import SideBar, SideBarType
from pocketthrone.managers.pipe import L
class WidgetManager:
WIDTH = 800
HEIGHT = 600
TILESIZE = 40
root = None
# engine properties
_tag = "[WidgetManager] "
_initialized = False
has_sidebar = False
# game-wide WidgetManager
gamestate = GameState.STATE_INITIALIZING
linked = {}
dimens = {"x": 0, "y": 0}
grid = {"width": 0, "height": 0}
# scrolling related
prev_scrolling = {"x": 0, "y": 0}
scrolling = {"x": 0, "y": 0}
scrolling_has_changed = False
def __init__(self):
# register in EventManager
EventManager.register(self)
# initialize screen & grid dimensions
self.update_screen()
# set GameState to LOADING
self._initialized = True
self.set_gamestate(GameState.STATE_LOADING)
def update_screen(self):
'''updates grid and screen dimension'''
# add width & height to self.dimens
self.dimens["width"] = self.WIDTH
self.dimens["height"] = self.HEIGHT
L.Screen = self.get_dimens()
# add width & height to self.grid
self.grid["width"] = int(self.WIDTH / self.TILESIZE)
self.grid["height"] = int(self.HEIGHT / self.TILESIZE)
L.Grid = self.get_grid()
# print new size
print(self._tag + "screen size is " + repr(self.dimens))
print(self._tag + "grid size is " + repr(self.grid))
def get_dimens(self):
'''returns a copy of screen size as tuple'''
return deepcopy(self.dimens)
def set_gamestate(self, value):
'''sets gamestate'''
self.gamestate = value
# fire GameStateChangedEvent
ev_state_changed = GameStateChangedEvent(value)
EventManager.fire(ev_state_changed)
def get_gamestate(self):
'''returns gamestate'''
return self.gamestate
def get_grid(self):
'''returns this screens grid size'''
return deepcopy(self.grid)
def get_scrolling(self):
'''returns scrolling offset'''
return deepcopy(L.MapManager.scrolling)
# TODO: remove
def scroll(self, (plus_x, plus_y)):
# self.scrolling["x"] = int(self.scrolling["x"]) + int(plus_x)
# self.scrolling["y"] = int(self.scrolling["x"]) + int(plus_y)
pass
def to_grid_pos(self, (pos_x, pos_y)):
'''translate a pixel position into the tile grid'''
map_x = int(pos_x / 40)
map_y = int(pos_y / 40)
# invert y axis
inv_y = L.Grid["height"] - map_y
return (map_x, map_y)
def to_relative_pos(self, translation):
'''returns a relative position vector from given GridTranslation'''
axis = grid_translation.axis
value = grid_translation.value
# initialize maximal values for dimensions
max = {"x": L.Grid["width"], "y": L.Grid["height"]}
# initialize pos
if value < 0:
value = max[axis] - value
elif value == 0:
value = 0
elif value > 0:
value = 0 + value
# when translation is vertical
if axis == CoordinateAxis.AXIS_Y:
return (0, pos)
# when translation is horizontal
elif axis == CoordinateAxis.AXIS_X:
return (pos, 0)
def _to_grid_translation(self, axis=CoordinateAxis.AXIS_X, forwards=True, value=0):
forwards = True
axis = axis
pos = {"x": 0, "y": 0}
rel_pos = None
max = {
"x": self.get_grid()["width"],
"y": self.get_grid()["height"]
}
value = value
# check if counting starts backwards
if value < 0:
forwards = False
# create & return relative coordinates
translation = GridTranslation(axis=axis, value=value)
return translation
def to_gui_pos(self, (map_x, map_y), y_inv=True):
'''returns a screen position for a grid position'''
#gui_x = self.x + (map_x *40)
gui_x = 0 + (map_x *40)
gui_y = 0 + ((map_y +1) *40)
# TODo remove fixed height
inv_y = 600 - gui_y
if y_inv:
return (gui_x, inv_y)
else:
return (gui_x, gui_y)
# returns a grid position considering the actual scrolling offset
def to_scrolled_pos(self, (grid_x, grid_y)):
'''returns a scrolled grid position'''
scrolled_x = grid_x + int(L.MapManager.scrolling["x"])
scrolled_y = grid_y + int(L.MapManager.scrolling["y"])
return (scrolled_x, scrolled_y)
def next_panel_id(self):
'''returns a new unique panel id'''
_id = self._last_panel_id + 1
return _id
def get_screen_size(self):
'''returns the size of the game window in px'''
return (Window.width, Window.height)
def register(self, link, widget):
'''registers widget under link'''
# check if widget registration is valid
if link == None or link == "untagged":
print(self._tag + "ERROR widget registration aborted for " + link)
return None
# add widget link to self.linked
self.linked[link] = widget
# print & return widget
print(self._tag + "registered link=" + link)
return widget
def get_widget(self, link):
'''returns widget under link or None'''
widget = None
success = False
# untagged widget -> loading unsuccessful
if link == None or link == "untagged":
success = False
# widget is tagged -> load from self.linked dict
else:
widget = self.linked.get(link)
# set success true when widget isn't None
if widget != None:
success = True
# return widget under name link
return widget
def remove_widget(self, link):
'''removes widget under link'''
widget = self.get_widget(link)
self.root.remove_widget(widget)
def _remove_widget(self, widget):
'''removes widget by class'''
root = self.get_widget("root_layout")
root.remove_widget(widget)
def button_clicked(self, button):
'''triggered when a button was clicked'''
ev_button_clicked = ButtonTouchedEvent(button.link, button.action, widget=button, extra=button.extra)
EventManager.fire(ev_button_clicked)
def on_event(self, event):
if isinstance(event, GameStartedEvent):
self.gamestate = GameState.STATE_INGAME
# clear BottomBar Labels on TileUnselectedEvent
if isinstance(event, TileUnselectedEvent):
# change labels
self.get_widget("heading").set_text("")
self.get_widget("details").set_text("")
# get actionbutton; set action & state
actionbutton = self.get_widget("actionbutton")
actionbutton.set_action(WidgetAction.ACTION_NONE)
actionbutton.set_state(WidgetState.STATE_DEFAULT)
# show unit data in BottomBar on UnitSelectedEvent
if isinstance(event, UnitSelectedEvent):
heading = self.get_widget("heading")
details = self.get_widget("details")
heading.set_plaintext("Unit: " + event.unit.name)
details.set_plaintext("HP: " + str(event.unit.hp) + " | MP: " + str(event.unit.mp))
# show city data in BottomBar on CitySelectedEvent
if isinstance(event, CitySelectedEvent):
city = event.city
# get production info text
txt_prod_info = "nothing"
if city.is_recruiting():
txt_prod_info = str(city.name_production()) + " (" + str(city._recruition().get_duration()) + ")"
# make text for heading and detail label
txt_city_heading = city.name_size() + ": " + city.get_name()
txt_city_details = "HP: " + str(city.get_hp()) + " | In Production: " + txt_prod_info
# get labels & actionbutton
heading = self.get_widget("heading")
details = self.get_widget("details")
actionbutton = self.get_widget("actionbutton")
# change labels
heading.set_text(txt_city_heading)
details.set_text(txt_city_details)
# set actionbutton state BUILD
actionbutton.set_action(WidgetAction.ACTION_BUILD)
# show sidebar
sidebar = SideBar(SideBarType.RECRUIT)
root = self.get_widget("root_layout")
root.add_widget(sidebar)
# handle Button clicks
if isinstance(event, ButtonTouchedEvent):
link = event.link
action = event.action
print(self._tag + "ButtonTouchedEvent link=" + link)
if not action:
print(self._tag + "NO ACTION; ABORTING")
# BUILD
if action == WidgetAction.ACTION_BUILD:
# check if a City is selected
selected_city = L.CityManager.get_selected_city()
if not selected_city:
return None
# show recruitable units on it
recruitable_units = L.CityManager.get_recruitable_units(selected_city)
sidebar.show_recruitable_units(recruitable_units)
# NEXT TURN
if link == "nextturnbutton":
L.PlayerManager.forward()
# gamestate changes
if isinstance(event, GameStateChangedEvent):
# menu initialized
if event.state == GameState.STATE_MENU:
pass
# loading...
elif event.state == GameState.STATE_LOADING:
pass
# ingame
elif event.state == GameState.STATE_MAP:
# update dimens & grid
self.update_screen()
if isinstance(event, KeyPressedEvent):
if event.key == "spacebar":
L.PlayerManager.forward()
| {
"repo_name": "herrschr/prey-game",
"path": "pocketthrone/managers/widgetmanager.py",
"copies": "2",
"size": "8768",
"license": "bsd-2-clause",
"hash": -5655741766288327000,
"line_mean": 29.5505226481,
"line_max": 103,
"alpha_frac": 0.6856751825,
"autogenerated": false,
"ratio": 3.0808151791988756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9603585563218857,
"avg_score": 0.032580959696003574,
"num_lines": 287
} |
"""All widgets in the kivy-grid-cells package"""
from contextlib import contextmanager
import logging
from kivy.properties import (
NumericProperty,
ListProperty,
BooleanProperty,
)
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.widget import Widget
import numpy as np
from .constants import Colours, States
log = logging.getLogger(__name__)
__all__ = ["GridCell", "DrawableGrid"]
class GridCell(Widget):
"""A cell within the grid. This can be activated by clicking it."""
state = NumericProperty(States.DEACTIVATED)
border_state = NumericProperty(States.DEACTIVATED)
colour = ListProperty(Colours[States.DEACTIVATED])
border_colour = ListProperty((0, 0, 0, 0))
def __init__(self, cell_size, coordinates):
self.coordinates = coordinates
column_number, row_number = coordinates
kwargs = {
"size_hint": [None, None],
"size": [cell_size, cell_size],
}
super(GridCell, self).__init__(**kwargs)
self.update_canvas()
def update_canvas(self):
""" Update the canvas with the current state of the cell
>>> cell = GridCell(1, (0, 0))
>>> cell.state = States.FIRST
>>> cell.update_canvas()
>>> cell.colour == list(Colours[States.FIRST])
True
>>> cell.border_colour
[0, 0, 0, 0]
>>> cell.border_state = States.SECOND
>>> cell.update_canvas()
>>> cell.border_colour == list(Colours[States.SECOND])
True
"""
self.colour = Colours[self.state]
if self.border_state == States.DEACTIVATED:
self.border_colour = (0, 0, 0, 0) # Transparent
else:
self.border_colour = Colours[self.border_state]
def update_parent_cell(self):
self.parent.update_cells(self.coordinates, self.state)
def set_state(self, state):
""" Set the fill state of the cell
>>> import mock
>>> cell = GridCell(1, (0, 0))
>>> cell.parent = mock.Mock()
>>> cell.set_state(States.FIRST)
>>> cell.state == States.FIRST
True
>>> cell.colour == list(Colours[States.FIRST])
True
"""
if hasattr(state, "dtype"):
assert state.dtype == int, state.dtype
state = int(state)
self.state = state
self.update_canvas()
self.update_parent_cell()
log.debug("Set state of {} to {}".format(self, state))
def set_border_state(self, state):
""" Set the border state of the cell
>>> cell = GridCell(1, (0, 0))
>>> cell.set_border_state(States.FIRST)
>>> cell.state == States.DEACTIVATED
True
>>> cell.colour == list(Colours[States.DEACTIVATED])
True
>>> cell.border_colour == list(Colours[States.FIRST])
True
"""
if hasattr(state, "dtype"):
assert state.dtype == int, state.dtype
state = int(state)
self.border_state = state
self.update_canvas()
def handle_touch(self):
""" Flip the cell's state between on and off
>>> import mock
>>> cell = GridCell(1, (0, 0))
>>> cell.parent = mock.Mock(selected_state=States.FIRST)
>>> new_state = cell.handle_touch()
>>> new_state == cell.state == States.FIRST
True
>>> new_state = cell.handle_touch()
>>> new_state == cell.state == States.DEACTIVATED
True
"""
if self.state == self.parent.selected_state:
new_state = States.DEACTIVATED
else:
new_state = self.parent.selected_state
self.set_state(new_state)
return new_state
def on_touch_down(self, evt):
if not self.collide_point(*evt.pos):
# Not on this square
return
self.handle_touch()
def on_touch_move(self, evt):
if not self.collide_point(*evt.pos):
# Not on this square
return super(GridCell, self).on_touch_move(evt)
if self.collide_point(*evt.ppos):
# Not moved to this square
return super(GridCell, self).on_touch_move(evt)
if self.parent.drag_state is None:
self.parent.drag_state = (
self.parent.selected_state
if self.state == States.DEACTIVATED else States.DEACTIVATED
)
elif self.parent.drag_state == self.state:
return
self.handle_touch()
def on_touch_up(self, evt):
if self.parent.drag_state is not None:
self.parent.drag_state = None
def __repr__(self):
return "{}<{}>".format(self.__class__.__name__,
", ".join(str(c) for c in self.coordinates))
class DrawableGrid(RelativeLayout):
"""A grid of cells that can be in a number of states"""
rows = NumericProperty()
cols = NumericProperty()
cell_size = NumericProperty(25)
selected_state = NumericProperty(States.FIRST)
grids = ListProperty()
num_grids = NumericProperty(1)
CELLS_GRID = 0
GRID_CELL_CLASS = GridCell
def __init__(self, *args, **kwargs):
super(DrawableGrid, self).__init__(*args, **kwargs)
self._cells = None
def cell_coordinates(self, pos, is_absolute=True):
""" Determine which cell corresponds to absolute or relative position
:param pos: Position in pixels
:type pos: 2-tuple
:param is_absolute: Is pos an absolute or relative position?
:type pos: bool
>>> import mock
>>> grid = DrawableGrid(cell_size=5)
>>> grid.to_widget = mock.Mock()
>>> grid.to_widget.return_value = (111, 111)
>>> # Returns calculated value
>>> grid.cell_coordinates((26, 35), is_absolute=False)
(5, 7)
>>> grid.to_widget.called
False
>>> # Returns mocked value
>>> grid.cell_coordinates((26, 35))
(22, 22)
>>> grid.to_widget.called
True
"""
if is_absolute:
pos = self.to_widget(*pos)
return (pos[0] // self.cell_size,
pos[1] // self.cell_size)
def init_cells(self):
""" Sets up the grid arrays and the cell widgets
Simple example:
>>> grid = DrawableGrid()
>>> grid.init_cells()
>>> grid.grids
[array([], shape=(0, 0), dtype=int64)]
>>> [g.flags.writeable for g in grid.grids]
[False]
>>> grid.cell_widgets
[]
Example with some cells and multiple grids:
>>> grid = DrawableGrid(rows=2, cols=1, num_grids=3)
>>> grid.init_cells()
>>> grid.grids
[array([[0, 0]]), array([[0, 0]]), array([[0, 0]])]
>>> [g.flags.writeable for g in grid.grids]
[False, False, False]
>>> grid.cell_widgets
[[GridCell<0, 0>], [GridCell<0, 1>]]
Check that overwriting is forbidden
>>> grid.init_cells()
Traceback (most recent call last):
RuntimeError: Cells already initialised!
"""
if self._cells is not None:
raise RuntimeError("Cells already initialised!")
self._setup_cell_widgets()
self._cells = np.zeros(dtype=int, shape=(self.cols, self.rows))
self.grids = [self._cells]
for num in range(1, self.num_grids):
self.grids.append(self._cells.copy())
for grid in self.grids:
grid.setflags(write=False)
self.drag_state = None
def _setup_cell_widgets(self):
self.cell_widgets = []
for row_number in xrange(self.rows):
row = []
for column_number in xrange(self.cols):
cell = self.GRID_CELL_CLASS(
self.cell_size, (column_number, row_number))
cell.y = (row_number) * self.cell_size
cell.x = (column_number) * self.cell_size
row.append(cell)
self.cell_widgets.append(row)
with self.canvas:
for row in self.cell_widgets:
for cell in row:
self.add_widget(cell)
@contextmanager
def _writable_grid(self, index):
"""Set self.grids[index] to be writable, then unset it"""
grid = self.grids[index]
try:
grid.setflags(write=True)
yield
finally:
grid.setflags(write=False)
return
def on_cells_updated(self):
"""This is a hook to update things when the cells have been updated"""
pass
@property
def writable_cells(self):
"""
Usage:
>>> grid = DrawableGrid()
>>> grid.init_cells()
>>> grid.cells.flags.writeable
False
>>> with grid.writable_cells:
... grid.cells.flags.writeable
True
>>> grid.cells.flags.writeable
False
"""
return self._writable_grid(index=self.CELLS_GRID)
def update_cells(self, coordinates, state):
""" Set cell state at coordinates.
:param coordinates: Cell coordinates to update
:type coordinates: 2-tuple
:param state: New state for the cell
:type state: int
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells
array([[0, 0]])
>>> grid.update_cells((0, 0), 1)
>>> grid.cells
array([[1, 0]])
"""
with self.writable_cells:
self._cells[coordinates] = state
self.on_cells_updated()
def set_cell_state(self, cell, y, x):
cell.set_state(self.cells[y, x])
def update_cell_widgets(self):
""" Set each cell widget's state according to the state of the np grid
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells = np.array([[1, 2]])
>>> grid.update_cell_widgets()
>>> grid.cell_widgets[0][0].state
1
>>> grid.cell_widgets[1][0].state
2
"""
for x, row in enumerate(self.cell_widgets):
for y, cell in enumerate(row):
self.set_cell_state(cell, y, x)
def clear_grid(self, index):
""" Replace the chosen grid with a zero grid of the same shape
:param index: Index of the grid to update
:type index: bool
>>> grid = DrawableGrid(rows=2, cols=1)
>>> grid.init_cells()
>>> grid.cells = np.array([[1, 2]])
>>> grid.clear_grid(0)
>>> grid.cells
array([[0, 0]])
"""
new_grid = np.zeros_like(self.grids[index])
if index == self.CELLS_GRID:
# cells property does everything we need
self.cells = new_grid
else:
new_grid.setflags(write=False)
self.grids[index] = new_grid
def clear_grid_for_event(self, grid_index, evt):
""" This is designed to be subclassed, so that only part of the grid
can be cleared, if so desired. """
return self.clear_grid(grid_index)
@property
def cells(self):
return self._cells
@cells.setter
def cells(self, cells):
"""
Cell values can be set here. This will update the related widgets.
"""
if hasattr(cells, "copy"):
# Assume cells is a numpy array
cells = cells.copy()
else:
cells = np.array(cells)
cells.setflags(write=False)
assert cells.ndim == 2, cells.ndim
assert cells.shape == self._cells.shape, "{} != {}".format(
cells.shape, self._cells.shape)
assert cells.dtype == self._cells.dtype, "{} != {}".format(
cells.dtype, self._cells.dtype)
self._cells = cells
self.grids[self.CELLS_GRID] = cells
self.on_cells_updated()
self.update_cell_widgets()
| {
"repo_name": "ukch/kivy-grid-cells",
"path": "kivy_grid_cells/widgets.py",
"copies": "1",
"size": "11933",
"license": "mit",
"hash": -866096381541386500,
"line_mean": 30.5687830688,
"line_max": 78,
"alpha_frac": 0.5513282494,
"autogenerated": false,
"ratio": 3.9137422105608395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4965070459960839,
"avg_score": null,
"num_lines": null
} |
"""All widgets in the kivy-grid-cells package"""
from __future__ import division
import functools
from hashlib import md5
from itertools import product
import logging
import os
import numpy as np
from kivy.base import EventLoop
from kivy.graphics import Color, Rectangle
from kivy.properties import (
AliasProperty,
BooleanProperty,
DictProperty,
ListProperty,
NumericProperty,
ObjectProperty,
)
from kivy.uix.behaviors import ButtonBehavior, DragBehavior
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy_grid_cells.constants import States, Colours
from kivy_p2life.constants import Types, FIDUCIALS
from kivy_grid_cells.widgets import GridCell, DrawableGrid
from . import events
from .exceptions import UnknownFiducialError, NoPiecesObjectForPlayer
def _get_root_widget():
EventLoop.ensure_window()
return EventLoop.window.children[0]
class LimitedGridCell(GridCell):
"""Subclassed GridCell to allow limiting on/off switch behaviour"""
def should_ignore_touch(self):
# TODO ignore touch when in TUIO-mode
if self.state == self.parent.selected_state:
return False
try:
return self.parent.get_player_pieces().pieces < 1
except NoPiecesObjectForPlayer:
return False
def handle_touch(self):
""" Flip the cell's state between on and off, then update player_pieces
>>> import mock
>>> cell = LimitedGridCell(1, (0, 0))
>>> cell.parent = mock.Mock()
>>> cell.parent = mock.Mock(selected_state=States.FIRST)
>>> cell.handle_touch()
>>> cell.parent.get_player_pieces.return_value.update_pieces.call_args
call(-1)
"""
if self.should_ignore_touch():
return
new_state = super(LimitedGridCell, self).handle_touch()
if new_state == States.DEACTIVATED:
value = 1
else:
value = -1
try:
self.parent.get_player_pieces().update_pieces(value)
except NoPiecesObjectForPlayer:
pass
class TUIODragDropMixin(object):
"""Create drag_shape/drop_shape events from TUIO events"""
def _require_fiducial(method):
@functools.wraps(method)
def wrapper(self, touch):
if not hasattr(touch, "fid"):
# touch is not a TUIO event
return getattr(super(TUIODragDropMixin, self), method.__name__)(touch)
return method(self, touch)
return wrapper
pattern_locations = DictProperty()
def __init__(self, *args, **kwargs):
self.register_event_type("on_confirm")
self.register_event_type("on_reset")
self.register_event_type("on_admin_reset")
super(TUIODragDropMixin, self).__init__(*args, **kwargs)
half_pi = np.pi / 2
self.rotation_array = np.array(
# [full circle, 3/4, half, 1/4, nothing]
[np.pi * 2, np.pi + half_pi, np.pi, half_pi, 0])
def touch_to_pattern(self, touch):
""" Find the related pattern from the touch and return it
:param touch: Kivy touch event with fiducial
Touch with pattern fiducial
>>> import mock
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {})()
>>> thing.touch_to_pattern(mock.Mock(fid=2, angle=0))
array([[ True, False],
[ True, True]], dtype=bool)
>>> thing.touch_to_pattern(mock.Mock(fid=2, angle=3))
array([[ True, True],
[False, True]], dtype=bool)
Touch with non-pattern fiducial
>>> event = mock.Mock(fid=101)
>>> event.__repr__ = lambda *a: "Mock event"
>>> thing.touch_to_pattern(event)
Traceback (most recent call last):
UnknownFiducialError: Mock event
"""
fid_type, pattern = FIDUCIALS.get(touch.fid, (None, None))
if fid_type != Types.PATTERN:
raise UnknownFiducialError(touch)
pattern = np.array(pattern)
# Use the (radian) angle to find out optimum rotation. This uses the
# index of self.rotation_array, eg. array[2] == np.pi (180deg); to
# rotate 180deg, rot90 needs to be called with argument 2.
rotations = np.abs(self.rotation_array - touch.angle).argmin()
return np.rot90(pattern, rotations)
@_require_fiducial
def on_touch_down(self, touch):
""" TUIO touch down event
No fiducial
>>> import mock
>>> logging.root._log = mock.Mock()
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {"cell_coordinates": lambda s, a: a})()
>>> thing.on_touch_down(object())
Event fiducial
>>> events.CustomEvent.dispatch = mock.Mock()
>>> thing.on_touch_down(mock.Mock(fid=101, pos=(0, 0), angle=0))
False
>>> events.ConfirmEventWhite.dispatch.call_count
1
Pattern fiducial
>>> thing.on_touch_down(mock.Mock(id=100, fid=2, pos=(0, 0), angle=0))
>>> thing.pattern_locations
{100: (0, 0, 2, 2)}
Unknown fiducial
>>> thing.on_touch_down(mock.Mock(fid=234, pos=(0, 0), angle=0))
False
>>> logging.root._log.call_count
1
>>> logging.root._log.call_args
call(30, 'Unrecognised fiducial 234 on down', ())
"""
# Fire custom events
fid_type, data = FIDUCIALS.get(touch.fid, (None, None))
if fid_type == Types.EVENT_DISPATCHER:
Event = getattr(events, data)
Event(touch).dispatch(self)
return False
# Set pattern location data
try:
pattern = self.touch_to_pattern(touch)
except UnknownFiducialError:
logging.warning("Unrecognised fiducial {} on down".format(touch.fid))
return False
if self.collide_point(*touch.pos):
self.pattern_locations[touch.id] = \
self.cell_coordinates(touch.pos) + pattern.shape
else:
self.pattern_locations[touch.id] = (None, None, None, None)
@_require_fiducial
def on_touch_up(self, touch):
if not self.collide_point(*touch.pos):
# Remove associated shape
self.clear_grid_for_event(self.PREVIEW_GRID, touch)
# Deregister this touch
if touch.id in self.pattern_locations:
del self.pattern_locations[touch.id]
@_require_fiducial
def on_touch_move(self, touch):
""" TUIO touch move event
No fiducial
>>> import mock
>>> logging.root._log = mock.Mock()
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {"cell_coordinates": lambda s, a: a})()
>>> thing.on_touch_move(object())
Pattern fiducial
>>> events.CustomEvent.dispatch = mock.Mock()
>>> thing.on_touch_move(mock.Mock(id=100, fid=2, pos=(0, 0), angle=0))
False
>>> events.DragShapeEvent.dispatch.call_count
1
>>> events.DragShapeEvent.dispatch.call_args == [(thing, ), {}]
True
>>> thing.pattern_locations
{100: (0, 0, 2, 2)}
Unknown fiducial
>>> thing.on_touch_move(mock.Mock(fid=234, pos=(0, 0), angle=0))
False
>>> logging.root._log.call_count
1
>>> logging.root._log.call_args
call(30, 'Unrecognised fiducial 234 on move', ())
"""
if not self.collide_point(*touch.pos):
return False
try:
pattern = self.touch_to_pattern(touch)
except UnknownFiducialError:
logging.warning("Unrecognised fiducial {} on move".format(touch.fid))
return False
events.DragShapeEvent(pattern, touch).dispatch(self)
self.pattern_locations[touch.id] = \
self.cell_coordinates(touch.pos) + pattern.shape
return False
def clear_grid_for_event(self, grid_index, evt):
if evt.id not in self.pattern_locations:
return super(TUIODragDropMixin, self).clear_grid_for_event(grid_index, evt)
adj_x, adj_y, x, y = self.pattern_locations[evt.id]
if None in (adj_x, adj_y, x, y):
return False
empty = np.zeros(shape=(x, y), dtype=int)
adj_x_end = adj_x + x
adj_y_end = adj_y + y
with self._writable_grid(grid_index):
self.grids[grid_index][adj_x:adj_x_end, adj_y:adj_y_end] = empty
def combine_with_cells(self, grid):
"""Add the given grid to the live grid
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {})()
>>> thing.cells = np.array([1, 0, 1])
>>> thing.combine_with_cells(np.array([0, 1, 0]))
>>> thing.cells
array([1, 1, 1])
"""
assert (States.ILLEGAL not in grid)
self.cells = grid + self.cells
def get_player_pieces(self):
player = _get_root_widget().player - 1
if player < self.player_pieces:
return self.player_pieces[player]
raise NoPiecesObjectForPlayer(player)
def on_confirm(self, evt):
""" Confirm event handler
:param evt: ConfirmEventBlack or ConfirmEventWhite
>>> import mock
>>> logging.root._log = mock.Mock()
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {})()
>>> EventLoop.window = mock.Mock(children=[mock.Mock(player=1)])
With bad player:
>>> thing.on_confirm(mock.Mock(player=2))
False
>>> logging.root._log.call_count
1
>>> logging.root._log.call_args
call(30, 'Caught unauthorised confirm for player 2', ())
"""
root = _get_root_widget()
if evt.player != root.player:
logging.warning("Caught unauthorised confirm for player {}".format(evt.player))
return False
grid = self.grids[self.PREVIEW_GRID].copy()
grid[grid == States.ILLEGAL] = States.DEACTIVATED
try:
self.get_player_pieces().update_pieces(-np.count_nonzero(grid))
except NoPiecesObjectForPlayer:
pass
self.combine_with_cells(grid)
self.clear_grid(self.PREVIEW_GRID)
self.update_cell_widgets()
root.end_turn()
def on_reset(self, evt):
""" Reset event handler
:param evt: ConfirmEventBlack or ConfirmEventWhite
>>> import mock
>>> logging.root._log = mock.Mock()
>>> thing = type("Thing", (TUIODragDropMixin, Widget), {})()
>>> EventLoop.window = mock.Mock(children=[mock.Mock(player=1)])
With bad player:
>>> thing.on_reset(mock.Mock(player=2))
False
>>> logging.root._log.call_count
1
>>> logging.root._log.call_args
call(30, 'Caught unauthorised reset for player 2', ())
"""
if evt.player != _get_root_widget().player:
logging.warning("Caught unauthorised reset for player {}".format(evt.player))
return False
self.clear_grid(self.PREVIEW_GRID)
self.update_cell_widgets()
def on_admin_reset(self, evt):
_get_root_widget().app.reset_ui()
class PiecesContainer(Widget):
"""A widget showing the user's pieces"""
number = NumericProperty(States.DEACTIVATED)
pieces = NumericProperty(0)
def __init__(self, *args, **kwargs):
super(PiecesContainer, self).__init__(*args, **kwargs)
self._colour_cache = {}
def redraw(self, old_amount=None):
""" Ensure the pieces in the container are the correct colour
:param old_amount: For efficiency's sake, only redraw this many boxes
Five pieces in a ten-piece cache
>>> pieces = PiecesContainer(number=States.FIRST, pieces=5)
>>> pieces._colour_cache
{}
>>> pieces.redraw(10)
>>> for key, (colour, shape) in sorted(pieces._colour_cache.items()):
... key, colour.rgb
((0, 0), [1.0, 1.0, 1.0])
((0, 1), [1.0, 1.0, 1.0])
((0, 2), [1.0, 1.0, 1.0])
((0, 3), [0.4, 0.4, 0.4])
((0, 4), [0.4, 0.4, 0.4])
((1, 0), [1.0, 1.0, 1.0])
((1, 1), [1.0, 1.0, 1.0])
((1, 2), [0.4, 0.4, 0.4])
((1, 3), [0.4, 0.4, 0.4])
((1, 4), [0.4, 0.4, 0.4])
Remove all pieces but only redraw the first two
>>> pieces.pieces = 0
>>> pieces.redraw(2)
>>> for key, (colour, shape) in sorted(pieces._colour_cache.items()):
... key, colour.rgb
((0, 0), [0.4, 0.4, 0.4])
((0, 1), [1.0, 1.0, 1.0])
((0, 2), [1.0, 1.0, 1.0])
((0, 3), [0.4, 0.4, 0.4])
((0, 4), [0.4, 0.4, 0.4])
((1, 0), [0.4, 0.4, 0.4])
((1, 1), [1.0, 1.0, 1.0])
((1, 2), [0.4, 0.4, 0.4])
((1, 3), [0.4, 0.4, 0.4])
((1, 4), [0.4, 0.4, 0.4])
Redraw the whole 10-piece cache
>>> pieces.redraw(10)
>>> for key, (colour, shape) in sorted(pieces._colour_cache.items()):
... key, colour.rgb
((0, 0), [0.4, 0.4, 0.4])
((0, 1), [0.4, 0.4, 0.4])
((0, 2), [0.4, 0.4, 0.4])
((0, 3), [0.4, 0.4, 0.4])
((0, 4), [0.4, 0.4, 0.4])
((1, 0), [0.4, 0.4, 0.4])
((1, 1), [0.4, 0.4, 0.4])
((1, 2), [0.4, 0.4, 0.4])
((1, 3), [0.4, 0.4, 0.4])
((1, 4), [0.4, 0.4, 0.4])
"""
# constants
CELL_SIZE = 25
# TODO allow this to be configured in gameoflife.ini
MAX_COLUMNS = 14
GREY = [0.4, 0.4, 0.4, 1]
if (old_amount and old_amount >= MAX_COLUMNS * 2
and self.pieces >= MAX_COLUMNS * 2):
# Container is full so there's no sense in redrawing it
return
if old_amount is None:
old_amount = MAX_COLUMNS * 2
max_iteration = max(old_amount, self.pieces)
cache = self._colour_cache
iterator = product(xrange(MAX_COLUMNS), xrange(2)) # left, right
for piece_number, (col, row) in enumerate(iterator):
if piece_number >= max_iteration:
break
if (row, col) not in cache:
# new rectangle
with self.canvas:
cache[(row, col)] = (
Color(*GREY),
Rectangle(size=(CELL_SIZE - 1, CELL_SIZE - 1)),
)
colour, rect = cache[(row, col)]
rect.pos = (self.x + row * CELL_SIZE, self.y + col * CELL_SIZE + 1)
if piece_number < self.pieces:
colour.rgb = Colours[self.number]
else:
colour.rgb = GREY
def update_pieces(self, by_amount):
old_amount = self.pieces
self.pieces += by_amount
self.redraw(old_amount)
class GOLGrid(TUIODragDropMixin, DrawableGrid):
"""Subclassed DrawableGrid to allow drag-drop behaviour"""
GRID_CELL_CLASS = LimitedGridCell
PREVIEW_GRID = 1
player_uis = ListProperty()
player_pieces = ListProperty() # TODO a better way to get player_pieces
def __init__(self, *args, **kwargs):
self.register_event_type("on_drag_shape")
self.register_event_type("on_drop_shape")
super(GOLGrid, self).__init__(*args, **kwargs)
def set_cell_state(self, cell, y, x):
super(GOLGrid, self).set_cell_state(cell, y, x)
grid = self.grids[self.PREVIEW_GRID]
cell.set_border_state(grid[y, x])
def get_player_ui(self, number):
for ui in self.player_uis:
if ui.number == number:
return ui
else:
raise KeyError("No Player found with number {}".format(number))
def drag_or_drop_shape(self, evt, grid_index, tolerate_illegal=False):
""" Draw a shape on the grid
:param evt: Touch event
:param grid_index: Index of the grid to update
:type grid_index: int
:param tolerate_illegal: If specified, illegal moves will draw a red
box on the grid. Otherwise nothing will happen.
:type tolerate_illegal: bool
>>> import mock
>>> EventLoop.window = mock.Mock(children=[mock.Mock(player=1)])
>>> grid = GOLGrid(rows=3, cols=1, num_grids=2)
>>> grid.player_pieces.append(mock.Mock(pieces=1))
>>> grid.init_cells()
>>> event = mock.Mock(pattern=np.array([[True]]), pos=(0, 0))
Put shape on live grid
>>> grid.drag_or_drop_shape(event, 0, tolerate_illegal=False)
>>> grid.grids
[array([[1, 0, 0]]), array([[0, 0, 0]])]
Illegal shape on preview grid
>>> grid.drag_or_drop_shape(event, 1, tolerate_illegal=False)
>>> grid.grids
[array([[1, 0, 0]]), array([[0, 0, 0]])]
Illegal shape on preview grid; tolerate_illegal=True
>>> grid.drag_or_drop_shape(event, 1, tolerate_illegal=True)
>>> grid.grids
[array([[1, 0, 0]]), array([[-1, 0, 0]])]
"""
root = _get_root_widget()
pattern = evt.pattern.astype(int) * root.player
x, y = pattern.shape
adj_x, adj_y = self.cell_coordinates(evt.pos)
adj_x_end = adj_x + x
adj_y_end = adj_y + y
try:
player_pieces = self.get_player_pieces()
except NoPiecesObjectForPlayer:
player_pieces = None
grid = self.grids[grid_index]
counters = np.count_nonzero(grid[grid != States.ILLEGAL])
counters += np.count_nonzero(pattern)
overlaps = (self._cells[adj_x:adj_x_end, adj_y:adj_y_end]
!= States.DEACTIVATED)
if (player_pieces and counters > player_pieces.pieces) or overlaps.any():
if tolerate_illegal:
# Change the whole pattern into a red grid
np.core.multiarray.copyto(pattern, States.ILLEGAL,
casting="unsafe")
else:
self.update_cell_widgets() # Clear any existing pattern
return
with self._writable_grid(grid_index):
grid[adj_x:adj_x_end, adj_y:adj_y_end] = pattern
if grid_index == self.CELLS_GRID and player_pieces:
player_pieces.update_pieces(-counters)
self.update_cell_widgets()
def on_drag_shape(self, evt):
if not self.collide_point(*evt.pos):
return False
self.clear_grid_for_event(self.PREVIEW_GRID, evt)
return self.drag_or_drop_shape(evt, self.PREVIEW_GRID,
tolerate_illegal=True)
def on_drop_shape(self, evt):
self.clear_grid_for_event(self.PREVIEW_GRID, evt)
if not self.collide_point(*evt.pos):
self.update_cell_widgets() # Clear any existing pattern
return False
return self.drag_or_drop_shape(evt, self.CELLS_GRID)
def on_cells_updated(self):
""" Update player scores with new values
>>> import mock
>>> grid = GOLGrid(rows=3, cols=1, num_grids=2)
>>> grid.player_uis.append(mock.Mock(number=1, score=0))
>>> grid.player_uis.append(mock.Mock(number=2, score=0))
>>> grid.init_cells()
>>> grid.cells = [[1, 2, 1]]
>>> grid.on_cells_updated()
>>> grid.player_uis[0].score
2
>>> grid.player_uis[1].score
1
"""
cells = self.cells
for ui in self.player_uis:
ui.score = np.count_nonzero(cells == ui.number)
def get_new_pieces_for_player(self, player):
return np.count_nonzero(self.cells == player) // 3
class PlayerUI(Label):
"""Holds details about the player"""
app = ObjectProperty()
colour = ListProperty(Colours[States.DEACTIVATED])
completeness = NumericProperty(0)
def get_score(self):
return getattr(self, "_score", 0)
def set_score(self, score):
if self.app is None:
top_score = 0
else:
top_score = self.app.top_score
score = min(score, top_score)
self._score = score
self.completeness = score / top_score
score = AliasProperty(get_score, set_score)
def get_number(self):
return getattr(self, "_number", States.DEACTIVATED)
def set_number(self, number):
assert number in [States.FIRST, States.SECOND], "Unknown state {}!".format(number)
self._number = number
self.colour = Colours[number]
number = AliasProperty(get_number, set_number)
@property
def has_maximum_score(self):
return (self.score == self.app.top_score)
# Not to be confused with has_maximum_score. This is set when a player had
# the maximum score at the last count, but they may not still have the
# maximum score.
had_maximum_score = BooleanProperty(False)
class RotatedImage(Image):
angle = NumericProperty()
class PatternVisualisation(DragBehavior, ButtonBehavior, RotatedImage):
original_position = ListProperty()
def show_pattern(self, pattern):
# TODO autogenerate assets
# TODO different assets for different players
# TODO asset caching (if possible)
digest = md5(np.array_str(pattern)).hexdigest()
self.source = os.path.join("assets", "{}.png".format(digest))
def setup(self):
assert self.parent.pattern is not None, "parent.pattern is not set!"
self.show_pattern(self.parent.pattern)
self.drag_rect_x = self.parent.x
self.drag_rect_y = self.parent.y
self.original_position = self.parent.pos
def on_touch_move(self, touch):
if super(PatternVisualisation, self).on_touch_move(touch):
evt = events.DragShapeEvent(self.parent.pattern, touch)
evt.dispatch(_get_root_widget())
return True
return False
def on_touch_up(self, touch):
if super(PatternVisualisation, self).on_touch_up(touch):
self.pos = self.original_position
evt = events.DropShapeEvent(self.parent.pattern, touch)
evt.dispatch(_get_root_widget())
return True
return False
def on_release(self):
with self.canvas:
self.angle -= 90
# For some reason np.rot90 rotates anti-clockwise, so we need to call
# it with argument 3 (to rotate 270 degrees instead of 90)
self.parent.pattern = np.rot90(self.parent.pattern, 3)
class CellShape(BoxLayout):
visualisation = ObjectProperty(None)
def __init__(self, *args, **kwargs):
super(CellShape, self).__init__(*args, **kwargs)
self._pattern = None
@property
def pattern(self):
return self._pattern
@pattern.setter
def pattern(self, pattern):
if hasattr(pattern, "copy"):
# Assume cells is a numpy array
pattern = pattern.copy()
else:
pattern = np.array(pattern)
self._pattern = pattern
def setup(self):
self.visualisation = PatternVisualisation(size=self.size)
self.add_widget(self.visualisation)
self.visualisation.setup()
| {
"repo_name": "ukch/kivy-p2life",
"path": "kivy_p2life/widgets.py",
"copies": "1",
"size": "23185",
"license": "mit",
"hash": 3071015695181904000,
"line_mean": 32.6502177068,
"line_max": 102,
"alpha_frac": 0.5707569549,
"autogenerated": false,
"ratio": 3.582354758961681,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4653111713861681,
"avg_score": null,
"num_lines": null
} |
"""All widgets related to editing channels are here."""
from PyQt4 import QtGui, QtCore
from ..ramps import Channel
from CommonWidgets import QMultipleSpinBoxEdit, QNamedPushButton
import rampage.format as fmt
class QEditChannelInfoDialog(QtGui.QDialog):
"""Dialog to edit channel info.
This dialog is called when the user right clicks on the channel name and
selects edit.
"""
def __init__(self, ch_name, dct, parent):
super(QEditChannelInfoDialog, self).__init__(parent)
self.setWindowTitle('Edit channel info')
self.text_name = QtGui.QLineEdit(ch_name, self)
self.text_comment = QtGui.QLineEdit(dct['comment'], self)
self.text_id = QtGui.QLineEdit(dct['id'], self)
self.button_ok = QtGui.QPushButton('Ok', self)
self.button_ok.clicked.connect(self.accept)
self.button_cancel = QtGui.QPushButton('Cancel', self)
self.button_cancel.clicked.connect(self.reject)
self.button_ok.clicked.connect(self.accept)
self.grid = QtGui.QGridLayout(self)
self.grid.addWidget(QtGui.QLabel('Name'), 0, 0)
self.grid.addWidget(self.text_name, 0, 1)
self.grid.addWidget(QtGui.QLabel('Comment'), 1, 0)
self.grid.addWidget(self.text_comment, 1, 1)
self.grid.addWidget(QtGui.QLabel('id'), 2, 0)
self.grid.addWidget(self.text_id, 2, 1)
if dct['type'] == 'analog':
print('yo la tengo ')
self.conversion = QtGui.QLineEdit(dct['conversion'], self)
self.grid.addWidget(QtGui.QLabel('conversion'), 3, 0)
self.grid.addWidget(self.conversion, 3, 1)
self.grid.addWidget(self.button_ok, 4, 0)
self.grid.addWidget(self.button_cancel, 4, 1)
self.setLayout(self.grid)
self.dct = dct
self.ch_name = ch_name
def exec_(self):
execReturn = super(QEditChannelInfoDialog, self).exec_()
name = str(self.text_name.text())
comment = str(self.text_comment.text())
id_string = str(self.text_id.text())
if self.dct['type'] == 'analog':
conversion_string = str(self.conversion.text())
else:
conversion_string = None
return execReturn, name, comment, id_string, conversion_string
class QChannelInfoBox(QtGui.QWidget):
"""Displays channel name, comment and other info.
This widget sits on the left-most column of every channel row.
Signals:
edit_signal(ch_name) - Emits this with its channel name whenever the user
clicks the edit menu item on the right-click menu. It is the job of the
parent widget to do something afterwards.
view_signal(ch_name) - Same as edit, but emitted when the user clicks view
"""
edit_signal = QtCore.pyqtSignal(object)
view_signal = QtCore.pyqtSignal(object)
def __init__(self, ch_name, dct, parent):
super(QChannelInfoBox, self).__init__(parent)
self.ch_name = ch_name
self.dct = dct
self.vbox = QtGui.QVBoxLayout(self)
self.setLayout(self.vbox)
self.ch_name_label = QtGui.QLabel(self)
self.vbox.addWidget(self.ch_name_label)
if dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(ch_name)))
self.generateToolTip()
# create actions to edit the keyframe
self.edit_action = QtGui.QAction('&Edit', self)
self.view_action = QtGui.QAction('&View Ramp', self)
# connect actions to slots
self.edit_action.triggered.connect(self.edit)
self.view_action.triggered.connect(self.view)
# create context menu
self.pop_menu = QtGui.QMenu(self)
self.pop_menu.addAction(self.edit_action)
self.pop_menu.addAction(self.view_action)
# right clicking on this will bring up the context menu
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
signal_str = 'customContextMenuRequested(const QPoint&)'
self.connect(self, QtCore.SIGNAL(signal_str), self.onContextMenu)
def generateToolTip(self):
tt = fmt.b(fmt.red(self.ch_name)) + '<br>'
tt += fmt.i(self.dct['comment']) + '<br>\n'
tt += fmt.b(self.dct['id']) + '<br>\n'
if self.dct['type'] == 'analog':
tt += 'Conversion: ' + fmt.b(self.dct['conversion']) + '<br>\n'
tt += '<br><i>right-click label to edit...</i>'
self.setToolTip(tt)
def edit(self):
self.edit_signal.emit(self.ch_name)
def view(self):
self.view_signal.emit(self.ch_name)
def onContextMenu(self, point):
# show context menu
self.pop_menu.exec_(self.mapToGlobal(point))
def edit_channel_info(self, new_ch_name, ch_dct):
"""Parent widget calls this whenever the user edits channel info.
"""
self.ch_name = new_ch_name
self.dct = ch_dct
if ch_dct['type'] == 'analog':
fmter = fmt.green
else:
fmter = fmt.blue
self.ch_name_label.setText(fmt.b(fmter(self.ch_name)))
self.generateToolTip()
class QChannelSegment(QtGui.QWidget):
delete_segment = QtCore.pyqtSignal(object)
edit_segment = QtCore.pyqtSignal()
def __init__(self, keyname, dct, parent, ramp_types):
super(QChannelSegment, self).__init__(parent)
self.dct = dct
self.vbox = QtGui.QVBoxLayout(self)
self.vbox.setSpacing(0)
self.setLayout(self.vbox)
self.keyname = keyname
self.ramp_types = ramp_types
self.ramp_type_list = sorted(self.ramp_types.keys())
self.curr_ramp_index = self.ramp_type_list.index(self.dct['ramp_type'])
self.ramp_type_combo = QtGui.QComboBox(self)
self.ramp_type_combo.addItems(sorted(self.ramp_types.keys()))
self.ramp_type_combo.insertSeparator(len(self.ramp_types))
self.ramp_type_combo.addItem('delete')
self.ramp_type_combo.setCurrentIndex(self.curr_ramp_index)
self.ramp_type_combo.currentIndexChanged.connect(self.handleRampTypeChanged)
ramp_parm_names = self.ramp_types[self.dct['ramp_type']]
ramp_parm_values = [self.dct['ramp_data'][k] for k in ramp_parm_names]
self.spin_boxes = QMultipleSpinBoxEdit(ramp_parm_names, self,
ramp_parm_values)
self.spin_boxes.valueChanged.connect(self.handleValueChanged)
self.vbox.addWidget(self.ramp_type_combo)
self.vbox.addWidget(self.spin_boxes)
def handleRampTypeChanged(self, new_ramp_type_index):
item_text = str(self.ramp_type_combo.itemText(new_ramp_type_index))
if item_text == 'delete':
self.delete_segment.emit(self.keyname)
else:
ramp_parm_names = self.ramp_types[item_text]
self.spin_boxes.editAttributes(ramp_parm_names)
self.dct['ramp_type'] = item_text
ramp_data_dct = {}
for rpn in ramp_parm_names:
ramp_data_dct[rpn] = 0.0
self.dct['ramp_data'] = ramp_data_dct
self.edit_segment.emit()
def handleValueChanged(self, new_values):
ramp_parm_names = self.ramp_types[self.dct['ramp_type']]
for rpn, val in zip(ramp_parm_names, new_values):
self.dct['ramp_data'][rpn] = val
self.edit_segment.emit()
class QDigitalChannelSegment(QChannelSegment):
def __init__(self, keyname, dct, parent, ramp_types):
super(QDigitalChannelSegment, self).__init__(keyname, dct,
parent, ramp_types)
# super(QDigitalChannelSegment, self).setupUi()
self.boolButton = QtGui.QPushButton(self)
self.boolButton.setCheckable(True)
self.state = self.dct['state']
self.boolButton.setChecked(self.state)
if self.state:
text = 'ON'
else:
text = 'OFF'
self.boolButton.setText(text)
self.boolButton.clicked.connect(self.handleBoolButtonClicked)
stylesheet = ('QPushButton:checked { background-color:'
'rgb(100,255,125); }'
'QPushButton { background-color:'
'rgb(255,125,100); }')
self.boolButton.setStyleSheet(stylesheet)
self.vbox.addWidget(self.boolButton)
def handleBoolButtonClicked(self, checked):
self.state = bool(checked)
if self.state:
text = 'ON'
else:
text = 'OFF'
self.boolButton.setText(text)
self.dct['state'] = self.state
self.edit_segment.emit()
class QChannel(Channel):
"""Edits channels.
parent widget should have the following slots:
handleEditChannelInfo(self, ch_name)
"""
def __init__(self, ch_name, dct, key_frame_list, settings, grid, parent,
ramp_types, start_pos=(0, 0)):
super(QChannel, self).__init__(ch_name, dct, key_frame_list)
self.start_pos = start_pos
self.parent = parent
self.grid = grid
self.ramp_types = ramp_types
self.channel_type = dct['type']
self.setupUi()
def setupUi(self):
self.ch_info = QChannelInfoBox(self.ch_name, self.dct, self.parent)
self.ch_info.edit_signal.connect(self.parent.handleEditChannelInfo)
self.ch_info.view_signal.connect(self.parent.handleViewChannel)
self.grid.addWidget(self.ch_info, self.start_pos[0], self.start_pos[1])
# cycle through all keys keys in key list and find out which ones
# we have in our channel
self.ch_segments = []
self.add_buttons = []
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if key in self.dct['keys']:
if self.channel_type == 'analog':
ch_seg = QChannelSegment(key, self.dct['keys'][key],
self.parent, self.ramp_types)
elif self.channel_type == 'digital':
ch_seg = QDigitalChannelSegment(key, self.dct['keys'][key],
self.parent,
self.ramp_types)
ch_seg.delete_segment.connect(self.handleDeleteSegment)
# evil hack
ch_seg.edit_segment.connect(self.parent.ramp_changed)
self.grid.addWidget(ch_seg, self.start_pos[0],
self.start_pos[1] + i + 1)
self.ch_segments.append(ch_seg)
else:
add_button = QNamedPushButton('+', key, self.parent)
add_button.clicked_name.connect(self.handleAddSegment)
self.grid.addWidget(add_button, self.start_pos[0],
self.start_pos[1] + i + 1)
self.add_buttons.append(add_button)
def edit_channel_info(self, new_ch_name, ch_dct):
self.set_name(new_ch_name)
self.dct = ch_dct
self.ch_info.edit_channel_info(new_ch_name, ch_dct)
def handleDeleteSegment(self, keyname):
index = -1
for i, ch_seg in enumerate(self.ch_segments):
if ch_seg.keyname == keyname:
index = i
if index is not -1:
ch_del = self.ch_segments.pop(index)
self.grid.removeWidget(ch_del)
ch_del.deleteLater()
self.dct['keys'].pop(keyname)
# evil hack follows
add_button = QNamedPushButton('+', keyname, self.parent)
add_button.clicked_name.connect(self.handleAddSegment)
keyindex = -1
# find where to place our new channel segment
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if keyname == key:
keyindex = i
self.grid.addWidget(add_button, self.start_pos[0],
self.start_pos[1] + keyindex + 1)
self.add_buttons.append(add_button)
self.parent.ramp_changed.emit()
def handleAddSegment(self, keyname):
index = -1
for i, add_button in enumerate(self.add_buttons):
if add_button.name == keyname:
index = i
break
if index is not -1:
add_button = self.add_buttons.pop(index)
self.grid.removeWidget(add_button)
add_button.deleteLater()
segment_dct = {}
ramp_type = sorted(self.ramp_types.keys())[0]
segment_dct['ramp_type'] = ramp_type
segment_dct['ramp_data'] = {}
if self.channel_type == 'digital':
segment_dct['state'] = False
for rpn in self.ramp_types[ramp_type]:
segment_dct['ramp_data'][rpn] = 0.0
self.dct['keys'][keyname] = segment_dct
if self.channel_type == 'analog':
ch_seg = QChannelSegment(keyname, self.dct['keys'][keyname],
self.parent, self.ramp_types)
elif self.channel_type == 'digital':
ch_seg = QDigitalChannelSegment(keyname,
self.dct['keys'][keyname],
self.parent, self.ramp_types)
ch_seg.delete_segment.connect(self.handleDeleteSegment)
# evil hack
ch_seg.edit_segment.connect(self.parent.ramp_changed)
keyindex = -1
# find where to place our new channel segment
for i, key in enumerate(self.key_frame_list.sorted_key_list()):
if keyname == key:
keyindex = i
self.grid.addWidget(ch_seg, self.start_pos[0],
self.start_pos[1] + keyindex + 1)
self.ch_segments.append(ch_seg)
self.parent.ramp_changed.emit()
| {
"repo_name": "shreyaspotnis/rampage",
"path": "rampage/widgets/ChannelWidgets.py",
"copies": "1",
"size": "14000",
"license": "mit",
"hash": 1379721710928499700,
"line_mean": 38.2156862745,
"line_max": 84,
"alpha_frac": 0.5808571429,
"autogenerated": false,
"ratio": 3.7273695420660276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9808061913174958,
"avg_score": 0.00003295435821387378,
"num_lines": 357
} |
__all__ = ["wikify"]
def wikify(model_ref):
def decorator(func):
def inner(request, *args, **kwargs):
# Import lazily, so we don't import views directly, saves us some
# trouble, e.g. https://bitbucket.org/kumar303/fudge/issue/17/module-import-order-influences-whether
from wikify.views import edit, diff, version, versions
if isinstance(model_ref, basestring):
try:
module_str, model_str = model_ref.rsplit('.', 1)
module = __import__(module_str, fromlist=[model_str])
model = getattr(module, model_str)
except ImportError, e:
raise ValueError("Module %s not found: %s"
% (module_str, e))
except AttributeError:
raise ValueError("Module %s has no attribute %s"
% (module_str, model_str))
else:
model = model_ref
# The primary key must be either given by the model field's name, or
# simply by Django's standard 'object_id'
primary_key = model._meta.pk.name
object_id = kwargs.get(primary_key) or kwargs.get('object_id')
# Get action
if request.method == 'POST':
action = request.POST.get('action')
else:
action = request.GET.get('action')
if action == 'edit':
return edit(request, model, object_id)
elif action == 'diff':
return diff(request, model, object_id)
elif action == 'version':
return version(request, model, object_id)
elif action == 'versions':
return versions(request, model, object_id)
else:
# No valid action given, call decorated view
return func(request, *args, **kwargs)
return inner
return decorator
| {
"repo_name": "cburgmer/django-wikify",
"path": "src/wikify/__init__.py",
"copies": "1",
"size": "2022",
"license": "bsd-3-clause",
"hash": 1263410663518853000,
"line_mean": 40.2653061224,
"line_max": 112,
"alpha_frac": 0.512363996,
"autogenerated": false,
"ratio": 4.605922551252847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5618286547252848,
"avg_score": null,
"num_lines": null
} |
# All windows write only unicode to the terminal -
# that's what blessings does, so we match it.
from __future__ import unicode_literals
import locale
import logging
import re
import sys
import blessings
from .formatstring import fmtstr
from .formatstringarray import FSArray
from .termhelpers import Cbreak
logger = logging.getLogger(__name__)
SCROLL_DOWN = u"\x1bD"
FIRST_COLUMN = u"\x1b[1G"
class BaseWindow(object):
def __init__(self, out_stream=None, hide_cursor=True):
logger.debug('-------initializing Window object %r------' % self)
if out_stream is None:
out_stream = sys.__stdout__
self.t = blessings.Terminal(stream=out_stream, force_styling=True)
self.out_stream = out_stream
self.hide_cursor = hide_cursor
self._last_lines_by_row = {}
self._last_rendered_width = None
self._last_rendered_height = None
def scroll_down(self):
logger.debug('sending scroll down message w/ cursor on bottom line')
# since scroll-down only moves the screen if cursor is at bottom
with self.t.location(x=0, y=1000000):
self.write(SCROLL_DOWN) # TODO will blessings do this?
def write(self, msg):
self.out_stream.write(msg)
self.out_stream.flush()
def __enter__(self):
logger.debug("running BaseWindow.__enter__")
if self.hide_cursor:
self.write(self.t.hide_cursor)
return self
def __exit__(self, type, value, traceback):
logger.debug("running BaseWindow.__exit__")
if self.hide_cursor:
self.write(self.t.normal_cursor)
def on_terminal_size_change(self, height, width):
# Changing the terminal size breaks the cache, because it
# is unknown how the window size change affected scrolling / the cursor
self._last_lines_by_row = {}
self._last_rendered_width = width
self._last_rendered_height = height
def render_to_terminal(self, array, cursor_pos=(0, 0)):
raise NotImplemented
def get_term_hw(self):
"""Returns current terminal height and width"""
return self.t.height, self.t.width
width = property(lambda self: self.t.width, None, None,
"The current width of the terminal window")
height = property(lambda self: self.t.height, None, None,
"The current height of the terminal window")
def array_from_text(self, msg):
"""Returns a FSArray of the size of the window containing msg"""
rows, columns = self.t.height, self.t.width
return self.array_from_text_rc(msg, rows, columns)
@classmethod
def array_from_text_rc(cls, msg, rows, columns):
arr = FSArray(0, columns)
i = 0
for c in msg:
if i >= rows * columns:
return arr
elif c in '\r\n':
i = ((i // columns) + 1) * columns - 1
else:
arr[i // arr.width, i % arr.width] = [fmtstr(c)]
i += 1
return arr
def fmtstr_to_stdout_xform(self):
if sys.version_info[0] == 2:
if hasattr(self.out_stream, 'encoding'):
encoding = self.out_stream.encoding
else:
encoding = locale.getpreferredencoding()
def for_stdout(s):
return unicode(s).encode(encoding, 'replace')
else:
def for_stdout(s):
return str(s)
return for_stdout
class FullscreenWindow(BaseWindow):
"""2D-text rendering window that dissappears when its context is left
FullscreenWindow will only render arrays the size of the terminal
or smaller, and leaves no trace on exit (like top or vim). It never
scrolls the terminal. Changing the terminal size doesn't do anything,
but rendered arrays need to fit on the screen.
Note:
The context of the FullscreenWindow
object must be entered before calling any of its methods.
Within the context of CursorAwareWindow, refrain from writing to
its out_stream; cached writes will be inaccurate.
"""
def __init__(self, out_stream=None, hide_cursor=True):
"""Constructs a FullscreenWindow
Args:
out_stream (file): Defaults to sys.__stdout__
hide_cursor (bool): Hides cursor while in context
"""
BaseWindow.__init__(self, out_stream=out_stream,
hide_cursor=hide_cursor)
self.fullscreen_ctx = self.t.fullscreen()
def __enter__(self):
self.fullscreen_ctx.__enter__()
return BaseWindow.__enter__(self)
def __exit__(self, type, value, traceback):
self.fullscreen_ctx.__exit__(type, value, traceback)
BaseWindow.__exit__(self, type, value, traceback)
def render_to_terminal(self, array, cursor_pos=(0, 0)):
"""Renders array to terminal and places (0-indexed) cursor
Args:
array (FSArray): Grid of styled characters to be rendered.
* If array received is of width too small, render it anyway
* If array received is of width too large,
* render the renderable portion
* If array received is of height too small, render it anyway
* If array received is of height too large,
* render the renderable portion (no scroll)
"""
# TODO there's a race condition here - these height and widths are
# super fresh - they might change between the array being constructed
# and rendered
# Maybe the right behavior is to throw away the render
# in the signal handler?
height, width = self.height, self.width
for_stdout = self.fmtstr_to_stdout_xform()
if not self.hide_cursor:
self.write(self.t.hide_cursor)
if (height != self._last_rendered_height or
width != self._last_rendered_width):
self.on_terminal_size_change(height, width)
current_lines_by_row = {}
rows = list(range(height))
rows_for_use = rows[:len(array)]
rest_of_rows = rows[len(array):]
# rows which we have content for and don't require scrolling
for row, line in zip(rows_for_use, array):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows onscreen that we don't have content for
for row in rest_of_rows:
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
logger.debug(
'lines in last lines by row: %r' % self._last_lines_by_row.keys()
)
logger.debug(
'lines in current lines by row: %r' % current_lines_by_row.keys()
)
self.write(self.t.move(*cursor_pos))
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
class CursorAwareWindow(BaseWindow):
"""
Renders to the normal terminal screen and
can find the location of the cursor.
Note:
The context of the CursorAwareWindow
object must be entered before calling any of its methods.
Within the context of CursorAwareWindow, refrain from writing to
its out_stream; cached writes will be inaccurate and calculating
cursor depends on cursor not having moved since the last render.
Only use the render_to_terminal interface for moving the cursor.
"""
def __init__(self, out_stream=None, in_stream=None,
keep_last_line=False, hide_cursor=True,
extra_bytes_callback=None):
"""Constructs a CursorAwareWindow
Args:
out_stream (file): Defaults to sys.__stdout__
in_stream (file): Defaults to sys.__stdin__
keep_last_line (bool): Causes the cursor to be moved down one line
on leaving context
hide_cursor (bool): Hides cursor while in context
extra_bytes_callback (f(bytes) -> None): Will be called with extra
bytes inadvertantly read in get_cursor_position(). If not
provided, a ValueError will be raised when this occurs.
"""
BaseWindow.__init__(self, out_stream=out_stream,
hide_cursor=hide_cursor)
if in_stream is None:
in_stream = sys.__stdin__
self.in_stream = in_stream
self._last_cursor_column = None
self._last_cursor_row = None
self.keep_last_line = keep_last_line
self.cbreak = Cbreak(self.in_stream)
self.extra_bytes_callback = extra_bytes_callback
# whether another SIGWINCH is queued up
self.another_sigwinch = False
# in the cursor query code of cursor diff
self.in_get_cursor_diff = False
def __enter__(self):
self.cbreak.__enter__()
self.top_usable_row, _ = self.get_cursor_position()
self._orig_top_usable_row = self.top_usable_row
logger.debug('initial top_usable_row: %d' % self.top_usable_row)
return BaseWindow.__enter__(self)
def __exit__(self, type, value, traceback):
if self.keep_last_line:
# just moves cursor down if not on last line
self.write(SCROLL_DOWN)
self.write(FIRST_COLUMN)
self.write(self.t.clear_eos)
self.write(self.t.clear_eol)
self.cbreak.__exit__(type, value, traceback)
BaseWindow.__exit__(self, type, value, traceback)
def get_cursor_position(self):
"""Returns the terminal (row, column) of the cursor
0-indexed, like blessings cursor positions"""
# TODO would this be cleaner as a parameter?
in_stream = self.in_stream
query_cursor_position = u"\x1b[6n"
self.write(query_cursor_position)
def retrying_read():
while True:
try:
c = in_stream.read(1)
if c == '':
raise ValueError("Stream should be blocking - should't"
" return ''. Returned %r so far",
(resp,))
return c
except IOError:
raise ValueError(
'cursor get pos response read interrupted'
)
# find out if this ever really happens - if so, continue
resp = ''
while True:
c = retrying_read()
resp += c
m = re.search('(?P<extra>.*)'
'(?P<CSI>\x1b\[|\x9b)'
'(?P<row>\\d+);(?P<column>\\d+)R', resp, re.DOTALL)
if m:
row = int(m.groupdict()['row'])
col = int(m.groupdict()['column'])
extra = m.groupdict()['extra']
if extra:
if self.extra_bytes_callback:
self.extra_bytes_callback(
extra.encode(in_stream.encoding)
)
else:
raise ValueError(("Bytes preceding cursor position "
"query response thrown out:\n%r\n"
"Pass an extra_bytes_callback to "
"CursorAwareWindow to prevent this")
% (extra,))
return (row - 1, col - 1)
def get_cursor_vertical_diff(self):
"""Returns the how far down the cursor moved since last render.
Note:
If another get_cursor_vertical_diff call is already in progress,
immediately returns zero. (This situation is likely if
get_cursor_vertical_diff is called from a SIGWINCH signal
handler, since sigwinches can happen in rapid succession and
terminal emulators seem not to respond to cursor position
queries before the next sigwinch occurs.)
"""
# Probably called by a SIGWINCH handler, and therefore
# will do cursor querying until a SIGWINCH doesn't happen during
# the query. Calls to the function from a signal handler COULD STILL
# HAPPEN out of order -
# they just can't interrupt the actual cursor query.
if self.in_get_cursor_diff:
self.another_sigwinch = True
return 0
cursor_dy = 0
while True:
self.in_get_cursor_diff = True
self.another_sigwinch = False
cursor_dy += self._get_cursor_vertical_diff_once()
self.in_get_cursor_diff = False
if not self.another_sigwinch:
return cursor_dy
def _get_cursor_vertical_diff_once(self):
"""Returns the how far down the cursor moved."""
old_top_usable_row = self.top_usable_row
row, col = self.get_cursor_position()
if self._last_cursor_row is None:
cursor_dy = 0
else:
cursor_dy = row - self._last_cursor_row
logger.info('cursor moved %d lines down' % cursor_dy)
while self.top_usable_row > -1 and cursor_dy > 0:
self.top_usable_row += 1
cursor_dy -= 1
while self.top_usable_row > 1 and cursor_dy < 0:
self.top_usable_row -= 1
cursor_dy += 1
logger.info('top usable row changed from %d to %d', old_top_usable_row,
self.top_usable_row)
logger.info('returning cursor dy of %d from curtsies' % cursor_dy)
self._last_cursor_row = row
return cursor_dy
def render_to_terminal(self, array, cursor_pos=(0, 0)):
"""Renders array to terminal, returns the number of lines scrolled offscreen
Returns:
Number of times scrolled
Args:
array (FSArray): Grid of styled characters to be rendered.
If array received is of width too small, render it anyway
if array received is of width too large, render it anyway
if array received is of height too small, render it anyway
if array received is of height too large, render it, scroll down,
and render the rest of it, then return how much we scrolled down
"""
for_stdout = self.fmtstr_to_stdout_xform()
# caching of write and tc (avoiding the self. lookups etc) made
# no significant performance difference here
if not self.hide_cursor:
self.write(self.t.hide_cursor)
# TODO race condition here?
height, width = self.t.height, self.t.width
if (height != self._last_rendered_height or
width != self._last_rendered_width):
self.on_terminal_size_change(height, width)
current_lines_by_row = {}
rows_for_use = list(range(self.top_usable_row, height))
# rows which we have content for and don't require scrolling
# TODO rename shared
shared = min(len(array), len(rows_for_use))
for row, line in zip(rows_for_use[:shared], array[:shared]):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows already on screen that we don't have content for
rest_of_lines = array[shared:]
rest_of_rows = rows_for_use[shared:]
for row in rest_of_rows: # if array too small
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
# TODO probably not necessary - is first char cleared?
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
# lines for which we need to scroll down to render
offscreen_scrolls = 0
for line in rest_of_lines: # if array too big
self.scroll_down()
if self.top_usable_row > 0:
self.top_usable_row -= 1
else:
offscreen_scrolls += 1
current_lines_by_row = dict(
(k - 1, v) for k, v in current_lines_by_row.items()
)
logger.debug('new top_usable_row: %d' % self.top_usable_row)
# since scrolling moves the cursor
self.write(self.t.move(height - 1, 0))
self.write(for_stdout(line))
current_lines_by_row[height - 1] = line
logger.debug(
'lines in last lines by row: %r' % self._last_lines_by_row.keys()
)
logger.debug(
'lines in current lines by row: %r' % current_lines_by_row.keys()
)
self._last_cursor_row = max(
0, cursor_pos[0] - offscreen_scrolls + self.top_usable_row
)
self._last_cursor_column = cursor_pos[1]
self.write(
self.t.move(self._last_cursor_row, self._last_cursor_column)
)
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
return offscreen_scrolls
def demo():
handler = logging.FileHandler(filename='display.log')
logging.getLogger(__name__).setLevel(logging.DEBUG)
logging.getLogger(__name__).addHandler(handler)
from . import input
with FullscreenWindow(sys.stdout) as w:
with input.Input(sys.stdin) as input_generator:
rows, columns = w.t.height, w.t.width
while True:
c = input_generator.next()
if c == "":
sys.exit() # same as raise SystemExit()
elif c == "h":
a = w.array_from_text("a for small array")
elif c == "a":
a = [fmtstr(c * columns) for _ in range(rows)]
elif c == "s":
a = [fmtstr(c * columns) for _ in range(rows - 1)]
elif c == "d":
a = [fmtstr(c * columns) for _ in range(rows + 1)]
elif c == "f":
a = [fmtstr(c * columns) for _ in range(rows - 2)]
elif c == "q":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == "w":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == "e":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == '\x0c': # ctrl-L
[w.write('\n') for _ in range(rows)]
continue
else:
a = w.array_from_text("unknown command")
w.render_to_terminal(a)
def main():
handler = logging.FileHandler(filename='display.log', level=logging.DEBUG)
logging.getLogger(__name__).setLevel(logging.DEBUG)
logging.getLogger(__name__).addHandler(handler)
print('this should be just off-screen')
w = FullscreenWindow(sys.stdout)
rows, columns = w.t.height, w.t.width
with w:
a = [fmtstr(
(('.row%r.' % (row,)) * rows)[:columns]
) for row in range(rows)]
w.render_to_terminal(a)
if __name__ == '__main__':
demo()
| {
"repo_name": "MarkWh1te/xueqiu_predict",
"path": "python3_env/lib/python3.4/site-packages/curtsies/window.py",
"copies": "3",
"size": "19889",
"license": "mit",
"hash": 2192117401964297200,
"line_mean": 37.6945525292,
"line_max": 84,
"alpha_frac": 0.5605611142,
"autogenerated": false,
"ratio": 4.068944353518821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001346496229274243,
"num_lines": 514
} |
# All windows write only unicode to the terminal -
# that's what blessings does, so we match it.
from typing import (
Optional,
Text,
IO,
Dict,
Generic,
TypeVar,
Type,
Tuple,
Callable,
Any,
ByteString,
cast,
TextIO,
Union,
List,
)
from types import TracebackType
import locale
import logging
import re
import sys
import blessings
from .formatstring import fmtstr, FmtStr
from .formatstringarray import FSArray
from .termhelpers import Cbreak
logger = logging.getLogger(__name__)
SCROLL_DOWN = "\x1bD"
FIRST_COLUMN = "\x1b[1G"
T = TypeVar("T", bound="BaseWindow")
class BaseWindow:
def __init__(self, out_stream=None, hide_cursor=True):
# type: (IO, bool) -> None
logger.debug("-------initializing Window object %r------" % self)
if out_stream is None:
out_stream = sys.__stdout__
self.t = blessings.Terminal(stream=out_stream, force_styling=True)
self.out_stream = out_stream
self.hide_cursor = hide_cursor
self._last_lines_by_row = {} # type: Dict[int, Optional[FmtStr]]
self._last_rendered_width = None # type: Optional[int]
self._last_rendered_height = None # type: Optional[int]
def scroll_down(self):
# type: () -> None
logger.debug("sending scroll down message w/ cursor on bottom line")
# since scroll-down only moves the screen if cursor is at bottom
with self.t.location(x=0, y=1000000):
self.write(SCROLL_DOWN) # TODO will blessings do this?
def write(self, msg):
# type: (Text) -> None
self.out_stream.write(msg)
self.out_stream.flush()
def __enter__(self):
# type: (T) -> T
logger.debug("running BaseWindow.__enter__")
if self.hide_cursor:
self.write(self.t.hide_cursor)
return self
def __exit__(self, type, value, traceback):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
logger.debug("running BaseWindow.__exit__")
if self.hide_cursor:
self.write(self.t.normal_cursor)
def on_terminal_size_change(self, height, width):
# type: (int, int) -> None
# Changing the terminal size breaks the cache, because it
# is unknown how the window size change affected scrolling / the cursor
self._last_lines_by_row = {}
self._last_rendered_width = width
self._last_rendered_height = height
def render_to_terminal(self, array, cursor_pos=(0, 0)):
# type: (Union[FSArray, List[FmtStr]], Tuple[int, int]) -> Optional[int]
raise NotImplementedError
def get_term_hw(self):
# type: () -> Tuple[int, int]
"""Returns current terminal height and width"""
return self.t.height, self.t.width
@property
def width(self):
# type: () -> int
"The current width of the terminal window"
return self.t.width
@property
def height(self):
# type: () -> int
"The current width of the terminal window"
return self.t.height
def array_from_text(self, msg):
# type: (Text) -> FSArray
"""Returns a FSArray of the size of the window containing msg"""
rows, columns = self.t.height, self.t.width
return self.array_from_text_rc(msg, rows, columns)
@classmethod
def array_from_text_rc(cls, msg, rows, columns):
# type: (Text, int, int) -> FSArray
arr = FSArray(0, columns)
i = 0
for c in msg:
if i >= rows * columns:
return arr
elif c in "\r\n":
i = ((i // columns) + 1) * columns - 1
else:
arr[i // arr.width, i % arr.width] = [fmtstr(c)]
i += 1
return arr
def fmtstr_to_stdout_xform(self):
# type: () -> Callable[[FmtStr], Text]
if sys.version_info[0] == 2:
if hasattr(self.out_stream, "encoding"):
encoding = self.out_stream.encoding
else:
encoding = locale.getpreferredencoding()
def for_stdout(s):
# type: (FmtStr) -> Text
return unicode(s).encode(encoding, "replace")
else:
def for_stdout(s):
# type: (FmtStr) -> Text
return str(s)
return for_stdout
class FullscreenWindow(BaseWindow):
"""2D-text rendering window that dissappears when its context is left
FullscreenWindow will only render arrays the size of the terminal
or smaller, and leaves no trace on exit (like top or vim). It never
scrolls the terminal. Changing the terminal size doesn't do anything,
but rendered arrays need to fit on the screen.
Note:
The context of the FullscreenWindow
object must be entered before calling any of its methods.
Within the context of CursorAwareWindow, refrain from writing to
its out_stream; cached writes will be inaccurate.
"""
def __init__(self, out_stream=None, hide_cursor=True):
# type: (IO, bool) -> None
"""Constructs a FullscreenWindow
Args:
out_stream (file): Defaults to sys.__stdout__
hide_cursor (bool): Hides cursor while in context
"""
BaseWindow.__init__(self, out_stream=out_stream, hide_cursor=hide_cursor)
self.fullscreen_ctx = self.t.fullscreen()
def __enter__(self):
# type: () -> FullscreenWindow
self.fullscreen_ctx.__enter__()
return BaseWindow.__enter__(self)
def __exit__(self, type, value, traceback):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
self.fullscreen_ctx.__exit__(type, value, traceback)
BaseWindow.__exit__(self, type, value, traceback)
def render_to_terminal(self, array, cursor_pos=(0, 0)):
# type: (Union[FSArray, List[FmtStr]], Tuple[int, int]) -> None
"""Renders array to terminal and places (0-indexed) cursor
Args:
array (FSArray): Grid of styled characters to be rendered.
* If array received is of width too small, render it anyway
* If array received is of width too large,
* render the renderable portion
* If array received is of height too small, render it anyway
* If array received is of height too large,
* render the renderable portion (no scroll)
"""
# TODO there's a race condition here - these height and widths are
# super fresh - they might change between the array being constructed
# and rendered
# Maybe the right behavior is to throw away the render
# in the signal handler?
height, width = self.height, self.width
for_stdout = self.fmtstr_to_stdout_xform()
if not self.hide_cursor:
self.write(self.t.hide_cursor)
if height != self._last_rendered_height or width != self._last_rendered_width:
self.on_terminal_size_change(height, width)
current_lines_by_row = {} # type: Dict[int, Optional[FmtStr]]
rows = list(range(height))
rows_for_use = rows[: len(array)]
rest_of_rows = rows[len(array) :]
# rows which we have content for and don't require scrolling
for row, line in zip(rows_for_use, array):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows onscreen that we don't have content for
for row in rest_of_rows:
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
logger.debug("lines in last lines by row: %r" % self._last_lines_by_row.keys())
logger.debug("lines in current lines by row: %r" % current_lines_by_row.keys())
self.write(self.t.move(*cursor_pos))
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
class CursorAwareWindow(BaseWindow):
"""
Renders to the normal terminal screen and
can find the location of the cursor.
Note:
The context of the CursorAwareWindow
object must be entered before calling any of its methods.
Within the context of CursorAwareWindow, refrain from writing to
its out_stream; cached writes will be inaccurate and calculating
cursor depends on cursor not having moved since the last render.
Only use the render_to_terminal interface for moving the cursor.
"""
def __init__(
self,
out_stream=None,
in_stream=None,
keep_last_line=False,
hide_cursor=True,
extra_bytes_callback=None,
):
# type: (IO, IO, bool, bool, Callable[[ByteString], None]) -> None
"""Constructs a CursorAwareWindow
Args:
out_stream (file): Defaults to sys.__stdout__
in_stream (file): Defaults to sys.__stdin__
keep_last_line (bool): Causes the cursor to be moved down one line
on leaving context
hide_cursor (bool): Hides cursor while in context
extra_bytes_callback (f(bytes) -> None): Will be called with extra
bytes inadvertantly read in get_cursor_position(). If not
provided, a ValueError will be raised when this occurs.
"""
BaseWindow.__init__(self, out_stream=out_stream, hide_cursor=hide_cursor)
if in_stream is None:
in_stream = sys.__stdin__
self.in_stream = in_stream
self._last_cursor_column = None # type: Optional[int]
self._last_cursor_row = None # type: Optional[int]
self.keep_last_line = keep_last_line
self.cbreak = Cbreak(self.in_stream)
self.extra_bytes_callback = extra_bytes_callback
# whether another SIGWINCH is queued up
self.another_sigwinch = False
# in the cursor query code of cursor diff
self.in_get_cursor_diff = False
def __enter__(self):
# type: () -> CursorAwareWindow
self.cbreak.__enter__()
self.top_usable_row, _ = self.get_cursor_position()
self._orig_top_usable_row = self.top_usable_row
logger.debug("initial top_usable_row: %d" % self.top_usable_row)
return BaseWindow.__enter__(self)
def __exit__(self, type, value, traceback):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
if self.keep_last_line:
# just moves cursor down if not on last line
self.write(SCROLL_DOWN)
self.write(FIRST_COLUMN)
self.write(self.t.clear_eos)
self.write(self.t.clear_eol)
self.cbreak.__exit__(type, value, traceback)
BaseWindow.__exit__(self, type, value, traceback)
def get_cursor_position(self):
# type: () -> Tuple[int, int]
"""Returns the terminal (row, column) of the cursor
0-indexed, like blessings cursor positions"""
# TODO would this be cleaner as a parameter?
in_stream = self.in_stream
query_cursor_position = "\x1b[6n"
self.write(query_cursor_position)
def retrying_read():
# type: () -> str
while True:
try:
c = in_stream.read(1)
if c == "":
raise ValueError(
"Stream should be blocking - should't"
" return ''. Returned %r so far",
(resp,),
)
return c
except OSError:
# apparently sometimes this happens: the only documented
# case is Terminal on a Ubuntu 17.10 VM on osx 10.13.
# see issue #732
logger.info("stdin.read(1) that should never error just errored.")
continue
resp = ""
while True:
c = retrying_read()
resp += c
m = re.search(
r"(?P<extra>.*)"
r"(?P<CSI>\x1b\[|\x9b)"
r"(?P<row>\d+);(?P<column>\d+)R",
resp,
re.DOTALL,
)
if m:
row = int(m.groupdict()["row"])
col = int(m.groupdict()["column"])
extra = m.groupdict()["extra"]
if extra:
if self.extra_bytes_callback:
self.extra_bytes_callback(
# TODO how do we know that this works?
extra.encode(cast(TextIO, in_stream).encoding)
)
else:
raise ValueError(
(
"Bytes preceding cursor position "
"query response thrown out:\n%r\n"
"Pass an extra_bytes_callback to "
"CursorAwareWindow to prevent this"
)
% (extra,)
)
return (row - 1, col - 1)
def get_cursor_vertical_diff(self):
# type: () -> int
"""Returns the how far down the cursor moved since last render.
Note:
If another get_cursor_vertical_diff call is already in progress,
immediately returns zero. (This situation is likely if
get_cursor_vertical_diff is called from a SIGWINCH signal
handler, since sigwinches can happen in rapid succession and
terminal emulators seem not to respond to cursor position
queries before the next sigwinch occurs.)
"""
# Probably called by a SIGWINCH handler, and therefore
# will do cursor querying until a SIGWINCH doesn't happen during
# the query. Calls to the function from a signal handler COULD STILL
# HAPPEN out of order -
# they just can't interrupt the actual cursor query.
if self.in_get_cursor_diff:
self.another_sigwinch = True
return 0
cursor_dy = 0
while True:
self.in_get_cursor_diff = True
self.another_sigwinch = False
cursor_dy += self._get_cursor_vertical_diff_once()
self.in_get_cursor_diff = False
if not self.another_sigwinch:
return cursor_dy
def _get_cursor_vertical_diff_once(self):
# type: () -> int
"""Returns the how far down the cursor moved."""
old_top_usable_row = self.top_usable_row
row, col = self.get_cursor_position()
if self._last_cursor_row is None:
cursor_dy = 0
else:
cursor_dy = row - self._last_cursor_row
logger.info("cursor moved %d lines down" % cursor_dy)
while self.top_usable_row > -1 and cursor_dy > 0:
self.top_usable_row += 1
cursor_dy -= 1
while self.top_usable_row > 1 and cursor_dy < 0:
self.top_usable_row -= 1
cursor_dy += 1
logger.info(
"top usable row changed from %d to %d",
old_top_usable_row,
self.top_usable_row,
)
logger.info("returning cursor dy of %d from curtsies" % cursor_dy)
self._last_cursor_row = row
return cursor_dy
def render_to_terminal(self, array, cursor_pos=(0, 0)):
# type: (Union[FSArray, List[FmtStr]], Tuple[int, int]) -> int
"""Renders array to terminal, returns the number of lines scrolled offscreen
Returns:
Number of times scrolled
Args:
array (FSArray): Grid of styled characters to be rendered.
If array received is of width too small, render it anyway
if array received is of width too large, render it anyway
if array received is of height too small, render it anyway
if array received is of height too large, render it, scroll down,
and render the rest of it, then return how much we scrolled down
"""
for_stdout = self.fmtstr_to_stdout_xform()
# caching of write and tc (avoiding the self. lookups etc) made
# no significant performance difference here
if not self.hide_cursor:
self.write(self.t.hide_cursor)
# TODO race condition here?
height, width = self.t.height, self.t.width
if height != self._last_rendered_height or width != self._last_rendered_width:
self.on_terminal_size_change(height, width)
current_lines_by_row = {} # type: Dict[int, Optional[FmtStr]]
rows_for_use = list(range(self.top_usable_row, height))
# rows which we have content for and don't require scrolling
# TODO rename shared
shared = min(len(array), len(rows_for_use))
for row, line in zip(rows_for_use[:shared], array[:shared]):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows already on screen that we don't have content for
rest_of_lines = array[shared:]
rest_of_rows = rows_for_use[shared:]
for row in rest_of_rows: # if array too small
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
# TODO probably not necessary - is first char cleared?
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
# lines for which we need to scroll down to render
offscreen_scrolls = 0
for line in rest_of_lines: # if array too big
self.scroll_down()
if self.top_usable_row > 0:
self.top_usable_row -= 1
else:
offscreen_scrolls += 1
current_lines_by_row = {k - 1: v for k, v in current_lines_by_row.items()}
logger.debug("new top_usable_row: %d" % self.top_usable_row)
# since scrolling moves the cursor
self.write(self.t.move(height - 1, 0))
self.write(for_stdout(line))
current_lines_by_row[height - 1] = line
logger.debug("lines in last lines by row: %r" % self._last_lines_by_row.keys())
logger.debug("lines in current lines by row: %r" % current_lines_by_row.keys())
self._last_cursor_row = max(
0, cursor_pos[0] - offscreen_scrolls + self.top_usable_row
)
self._last_cursor_column = cursor_pos[1]
self.write(self.t.move(self._last_cursor_row, self._last_cursor_column))
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
return offscreen_scrolls
def demo():
# type: () -> None
handler = logging.FileHandler(filename="display.log")
logging.getLogger(__name__).setLevel(logging.DEBUG)
logging.getLogger(__name__).addHandler(handler)
from . import input
with FullscreenWindow(sys.stdout) as w:
with input.Input(sys.stdin) as input_generator:
rows, columns = w.t.height, w.t.width
while True:
c = input_generator.next()
assert isinstance(c, Text)
if c == "":
sys.exit() # same as raise SystemExit()
elif c == "h":
a = w.array_from_text(
"a for small array"
) # type: Union[List[FmtStr], FSArray]
elif c == "a":
a = [fmtstr(c * columns) for _ in range(rows)]
elif c == "s":
a = [fmtstr(c * columns) for _ in range(rows - 1)]
elif c == "d":
a = [fmtstr(c * columns) for _ in range(rows + 1)]
elif c == "f":
a = [fmtstr(c * columns) for _ in range(rows - 2)]
elif c == "q":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == "w":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == "e":
a = [fmtstr(c * columns) for _ in range(1)]
elif c == "\x0c": # ctrl-L
for _ in range(rows):
w.write("\n")
continue
else:
a = w.array_from_text("unknown command")
w.render_to_terminal(a)
def main():
# type: () -> None
handler = logging.FileHandler(filename="display.log")
logging.getLogger(__name__).setLevel(logging.DEBUG)
logging.getLogger(__name__).addHandler(handler)
print("this should be just off-screen")
w = FullscreenWindow(sys.stdout)
rows, columns = w.t.height, w.t.width
with w:
a = [fmtstr(((f".row{row!r}.") * rows)[:columns]) for row in range(rows)]
w.render_to_terminal(a)
if __name__ == "__main__":
demo()
| {
"repo_name": "sebastinas/curtsies",
"path": "curtsies/window.py",
"copies": "1",
"size": "21894",
"license": "mit",
"hash": 8332311390666606000,
"line_mean": 36.618556701,
"line_max": 105,
"alpha_frac": 0.5570475929,
"autogenerated": false,
"ratio": 4.011359472334188,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037213813971044297,
"num_lines": 582
} |
__all__ = [ "word_to_number" ]
units = {
'zero': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'five': 5,
'six': 6,
'seven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90
}
mults = {
'thousand': 1000,
'million': 1000000,
'billion': 1000000000,
'trillion': 1000000000000,
'quadrillion': 1000000000000000,
'quintillion': 1000000000000000000,
'sextillion': 1000000000000000000000,
'septillion': 1000000000000000000000000,
'octillion': 1000000000000000000000000000,
'nonillion': 1000000000000000000000000000000,
'decillion': 1000000000000000000000000000000000,
}
def word_to_number(text):
"""All easy, except that "hundred" is special.
Four thousand three hundred is not 4 * 1000 * 3 * 100.
"""
group_accum = 0 # reset to zero on close of every thousands
whole_accum = 0
for word in text.split():
if word == "hundred":
group_accum *= 100
continue
new_value = units.get(word)
if new_value:
group_accum += new_value
continue
multiplier = mults.get(word)
if multiplier:
whole_accum += group_accum * multiplier
group_accum = 0 # reset
continue
raise ValueError(word)
return whole_accum + group_accum
| {
"repo_name": "mlaco/mathcessor",
"path": "lib/mathcessor/numbers.py",
"copies": "1",
"size": "1683",
"license": "mit",
"hash": -4758599633314502000,
"line_mean": 21.1447368421,
"line_max": 64,
"alpha_frac": 0.5478312537,
"autogenerated": false,
"ratio": 3.3392857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43871169679857147,
"avg_score": null,
"num_lines": null
} |
# All work taken and adapted from https://github.com/lukaszbanasiak/yahoo-finance
from datetime import datetime, timedelta
import pytz
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import simplejson
# Yahoo! YQL API
PUBLIC_API_URL = 'http://query.yahooapis.com/v1/public/yql'
OAUTH_API_URL = 'http://query.yahooapis.com/v1/yql'
DATATABLES_URL = 'store://datatables.org/alltableswithkeys'
class YQLQuery(object):
def __init__(self):
self.connection = HTTPConnection('query.yahooapis.com')
def execute(self, yql, token = None):
self.connection.request('GET', PUBLIC_API_URL + '?' + urlencode({ 'q': yql, 'format': 'json', 'env': DATATABLES_URL }))
return simplejson.loads(self.connection.getresponse().read())
def edt_to_utc(date, mask='%m/%d/%Y %I:%M%p'):
"""
Convert EDT (Eastern Daylight Time) to UTC
:param date: EDT date string e.g. '5/26/2014 4:00pm'
:param mask: format of input date e.g '%m/%d/%Y %I:%M%'
:return: UTC date string e.g '2014-03-05 12:23:00 UTC+0000'
"""
utc = pytz.utc
eastern = pytz.timezone('US/Eastern')
# date string for yahoo can contains 0 rather than 12.
# This means that it cannot be parsed with %I see GH issue #15.
date_ = datetime.strptime(date.replace(" 0:", " 12:"), mask)
date_eastern = eastern.localize(date_, is_dst=None)
date_utc = date_eastern.astimezone(utc)
return date_utc.strftime('%Y-%m-%d %H:%M:%S %Z%z')
def get_date_range(start_day, end_day, step_days=365, mask='%Y-%m-%d'):
"""
Split date range for a specified number of days.
Generate tuples with intervals from given range of dates, e.g for `2012-04-25`-`2014-04-29`:
('2013-04-29', '2014-04-29')
('2012-04-28', '2013-04-28')
('2012-04-25', '2012-04-27')
:param start_day: start date string
:param end_day: end date string
:param step_days: step days
:param mask: format of input date e.g '%Y-%m-%d'
"""
start = datetime.strptime(start_day, mask)
end = datetime.strptime(end_day, mask)
if start > end:
raise ValueError('Start date "%s" is greater than "%s"' % (start_day, end_day))
step = timedelta(days=step_days)
while end - step > start:
current = end - step
yield current.strftime(mask), end.strftime(mask)
end = current - timedelta(days=1)
else:
yield start.strftime(mask), end.strftime(mask)
class YQLQueryError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return 'Query failed with error: "%s".' % repr(self.value)
class YQLResponseMalformedError(Exception):
def __str__(self):
return 'Response malformed.'
class Portfolio(object):
'''
Collects individual shares and fetches data
'''
SHORT_LIST_LEN = 8
def __init__(self):
self.symbols = {} # (str (ticker symbol) -> Share object)
self._data_set = None # dictionary of yahoo financial dictionaries about the stocks in the portfolio
def get_current_data(self, symbol):
if self._data_set is None:
self.refresh()
return self._data_set[symbol]
def get_price(self, symbol):
return self._data_set[symbol]['LastTradePriceOnly']
def get_price_change(self, symbol):
return self._data_set[symbol]['Change']
def get_perc_change(self, symbol):
return self._data_set[symbol]['ChangeinPercent']
def _get_stock_tuples(self):
stocks = []
for sym in self._data_set:
i = 1 if self.get_price_change(sym)[0] == '+' else 0
price_change = float(self.get_price_change(sym)[i:])
perc_change = float(self.get_perc_change(sym)[i:-1])
stocks.append((sym, float(self.get_price(sym)), price_change, perc_change))
return stocks
def get_sorted_by_symbol(self):
return sorted(self._get_stock_tuples())
def get_winners(self):
return sorted(self._get_stock_tuples(), key=lambda t: t[-1], reverse=True)[:Portfolio.SHORT_LIST_LEN]
def get_losers(self):
return sorted(self._get_stock_tuples(), key=lambda t: t[-1])[:Portfolio.SHORT_LIST_LEN]
def _prepare_query(self, **kwargs):
"""
Simple YQL query builder
"""
if self.symbols.keys() == []:
raise ValueError, "Not enough symbols in table"
tup = str(tuple(self.symbols.keys()))
if len(self.symbols.keys()) == 1:
tup = tup.replace(',','')
query = 'select * from yahoo.finance.quotes where symbol in ' + tup
if kwargs:
query += ''.join(' and {0}="{1}"'.format(k, v)
for k, v in kwargs.items())
return query
def add_share(self, share):
sym = share.symbol
if sym not in self.symbols:
self.symbols[sym] = []
self.symbols[sym].append(share)
@staticmethod
def _is_error_in_results(results):
"""
Check if key name does not start from `Error*`
For example when Symbol is not found we can find key:
`"ErrorIndicationreturnedforsymbolchangedinvalid": "No such ticker symbol. (...)",`
"""
# check if response is dictionary, skip if it is different e.g. list from `get_historical()`
if isinstance(results, dict):
return next((results[i] for i in results.keys() if 'Error' in i), False)
@staticmethod
def _change_incorrect_none(results):
"""
Change N/A values to None
"""
# check if response is dictionary, skip if it is different e.g. list from `get_historical()`
if isinstance(results, dict):
for k, v in results.items():
if v:
if 'N/A' in v:
results[k] = None
def _request(self, query):
response = YQLQuery().execute(query)
try:
_, results = response['query']['results'].popitem()
except (KeyError, StopIteration):
try:
raise YQLQueryError(response['error']['description'])
except KeyError:
raise YQLResponseMalformedError()
else:
if self._is_error_in_results(results):
raise YQLQueryError(self._is_error_in_results(results))
self._change_incorrect_none(results)
if type(results) == dict:
results = [results]
return results
def _fetch(self):
query = self._prepare_query()
data = self._request(query)
# if 'LastTradeDate' in data and data['LastTradeDate'] and 'LastTradeTime' in data and data['LastTradeTime']:
# data[u'LastTradeDateTimeUTC'] = edt_to_utc('{0} {1}'.format(data['LastTradeDate'], data['LastTradeTime']))
data_dict = {}
for d in data:
data_dict[d['symbol']] = d
return data_dict
def refresh(self):
"""
Refresh stock data
"""
self._data_set = self._fetch()
class Share(object):
def __init__(self, symbol):
self.symbol = symbol
# def get_price(self):
# return self.data_set['LastTradePriceOnly']
#
# def get_change(self):
# return self.data_set['Change']
#
# def get_volume(self):
# return self.data_set['Volume']
#
# def get_prev_close(self):
# return self.data_set['PreviousClose']
#
# def get_open(self):
# return self.data_set['Open']
#
# def get_avg_daily_volume(self):
# return self.data_set['AverageDailyVolume']
#
# def get_stock_exchange(self):
# return self.data_set['StockExchange']
#
# def get_market_cap(self):
# return self.data_set['MarketCapitalization']
#
# def get_book_value(self):
# return self.data_set['BookValue']
#
# def get_ebitda(self):
# return self.data_set['EBITDA']
#
# def get_dividend_share(self):
# return self.data_set['DividendShare']
#
# def get_dividend_yield(self):
# return self.data_set['DividendYield']
#
# def get_earnings_share(self):
# return self.data_set['EarningsShare']
#
# def get_days_high(self):
# return self.data_set['DaysHigh']
#
# def get_days_low(self):
# return self.data_set['DaysLow']
#
# def get_year_high(self):
# return self.data_set['YearHigh']
#
# def get_year_low(self):
# return self.data_set['YearLow']
#
# def get_50day_moving_avg(self):
# return self.data_set['FiftydayMovingAverage']
#
# def get_200day_moving_avg(self):
# return self.data_set['TwoHundreddayMovingAverage']
#
# def get_price_earnings_ratio(self):
# return self.data_set['PERatio']
#
# def get_price_earnings_growth_ratio(self):
# return self.data_set['PEGRatio']
#
# def get_price_sales(self):
# return self.data_set['PriceSales']
#
# def get_price_book(self):
# return self.data_set['PriceBook']
#
# def get_short_ratio(self):
# return self.data_set['ShortRatio']
#
# def get_trade_datetime(self):
# return self.data_set['LastTradeDateTimeUTC']
#
# def get_historical(self, start_date, end_date):
# """
# Get Yahoo Finance Stock historical prices
#
# :param start_date: string date in format '2009-09-11'
# :param end_date: string date in format '2009-09-11'
# :return: list
# """
# hist = []
# for s, e in get_date_range(start_date, end_date):
# try:
# query = self._prepare_query(table='historicaldata', startDate=s, endDate=e)
# result = self._request(query)
# if isinstance(result, dict):
# result = [result]
# hist.extend(result)
# except AttributeError:
# pass
# return hist
# def get_info(self):
# """
# Get Yahoo Finance Stock Summary Information
#
# :return: dict
# """
# query = self._prepare_query(table='stocks')
# return self._request(query)
| {
"repo_name": "eric-chahin/finance_terminal_dashboard",
"path": "finance.py",
"copies": "1",
"size": "10406",
"license": "apache-2.0",
"hash": -4345730616649269000,
"line_mean": 30.1556886228,
"line_max": 123,
"alpha_frac": 0.5827407265,
"autogenerated": false,
"ratio": 3.5418652144315863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46246059409315865,
"avg_score": null,
"num_lines": null
} |
__all__=['writemarkdown','putlogo','print_abscissavalue','figsize']
from IPython.display import display,Markdown
from IPython.core.getipython import get_ipython
import matplotlib.pyplot as plt
import sastool
import numpy as np
import pkg_resources
from scipy.misc import imread
credo_logo = imread(pkg_resources.resource_filename('credolib','resource/credo_logo.png'))
def writemarkdown(*args):
display(Markdown(' '.join(str(a) for a in args)))
def putlogo(figure=None):
"""Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied).
"""
ip = get_ipython()
if figure is None:
figure=plt.gcf()
curraxis= figure.gca()
logoaxis = figure.add_axes([0.89, 0.01, 0.1, 0.1], anchor='NW')
logoaxis.set_axis_off()
logoaxis.xaxis.set_visible(False)
logoaxis.yaxis.set_visible(False)
logoaxis.imshow(credo_logo)
figure.subplots_adjust(right=0.98)
figure.sca(curraxis)
def print_abscissavalue(q, wavelength=None, distance=None, digits=10):
qunit = sastool.libconfig.qunit()
dunit = sastool.libconfig.dunit()
formatstring='%%.%df'%digits
retval = str(q) + ' ' + qunit
retval = retval + "("
retval = retval + " <=> " + formatstring %(2 * np.pi / q) + " " + dunit + "(d)"
retval = retval + " <=> " + formatstring %(1 / q) + " " + dunit + "(Rg)"
if wavelength is not None:
tth_rad = 2 * np.arcsin((q * wavelength) / 4 / np.pi)
tth_deg = tth_rad * 180.0 / np.pi
retval = retval + " <=> " + formatstring %(tth_deg) + "\xb0"
if distance is not None:
radius = np.tan(tth_rad) * distance
retval = retval + " <=> " + formatstring % (radius) + " mm(r)"
retval = retval + ")"
return retval
class figsize(object):
def __init__(self, sizex, sizey):
self._originalsize=plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize']=(sizex, sizey)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
plt.rcParams['figure.figsize']=self._originalsize
return False # we don't want to suppress the exception, if any
| {
"repo_name": "awacha/credolib",
"path": "credolib/utils.py",
"copies": "1",
"size": "2186",
"license": "bsd-3-clause",
"hash": -2977584823737724000,
"line_mean": 36.6896551724,
"line_max": 90,
"alpha_frac": 0.6299176578,
"autogenerated": false,
"ratio": 3.2052785923753664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9267159157288394,
"avg_score": 0.013607418577394434,
"num_lines": 58
} |
__all__ = ['write_mol2']
def write_mol2(filename, traj):
"""Output a Trajectory as a TRIPOS mol2 file.
Parameters
----------
filename : str
Path of the output file.
"""
atoms = list(traj.top.atoms)
xyz = traj.xyz[0]
bonds = list(traj.top.bonds)
with open(filename, 'w') as mol2_file:
mol2_file.write("@<TRIPOS>MOLECULE\n")
mol2_file.write("Generated by mBuild\n")
mol2_file.write("{0} {1} 0 0 0\n".format(len(atoms), len(bonds)))
mol2_file.write("SMALL\n")
mol2_file.write("NO_CHARGES\n")
mol2_file.write("\n")
mol2_file.write("@<TRIPOS>ATOM\n")
atom_mapping = dict()
for coord, atom in zip(xyz, atoms):
x, y, z = coord * 10.0 # Nanometers to angstroms.
mol2_file.write("{0:d} {1:s} {2:8.4f} {3:8.4f} {4:8.4f} {5:s} {6:d} {7:s} {8:8.4f}\n".format(
atom.index + 1, atom.name, x, y, z, atom.name, 1, atom.residue.name, 0.0))
atom_mapping[atom] = atom.index + 1
if len(bonds) > 0:
mol2_file.write("@<TRIPOS>BOND\n")
for bond_n, bond in enumerate(bonds):
atom1 = atom_mapping[bond[0]]
atom2 = atom_mapping[bond[1]]
mol2_file.write("{0} {1} {2} 1\n".format(bond_n + 1, atom1, atom2))
mol2_file.write("<@TRIPOS>\n")
| {
"repo_name": "Jonestj1/mbuild",
"path": "mbuild/formats/mol2.py",
"copies": "1",
"size": "1375",
"license": "mit",
"hash": -6047402242403095000,
"line_mean": 33.375,
"line_max": 105,
"alpha_frac": 0.5272727273,
"autogenerated": false,
"ratio": 2.789046653144016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3816319380444016,
"avg_score": null,
"num_lines": null
} |
__all__ = ['writer']
import docutils.core as dc
import docutils.writers
from docutils import nodes
from docutils.writers.latex2e import (Writer, LaTeXTranslator,
PreambleCmds)
from .rstmath import mathEnv
from . import code_block
from options import options
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
PreambleCmds.float_settings = '''
\\usepackage[font={small,it},labelfont=bf]{caption}
\\usepackage{float}
'''
class Translator(LaTeXTranslator):
def __init__(self, *args, **kwargs):
LaTeXTranslator.__init__(self, *args, **kwargs)
# Handle author declarations
self.current_field = ''
self.copyright_holder = None
self.author_names = []
self.author_institutions = []
self.author_institution_map = dict()
self.author_emails = []
self.corresponding = []
self.equal_contributors = []
self.paper_title = ''
self.abstract_text = []
self.keywords = ''
self.table_caption = []
self.video_url = ''
self.bibliography = ''
# This gets read by the underlying docutils implementation.
# If present, it is a list with the first entry the style name
# and the second entry the BiBTeX file (see `visit_field_body`)
self.bibtex = None
self.abstract_in_progress = False
self.non_breaking_paragraph = False
self.figure_type = 'figure'
self.figure_alignment = 'left'
self.table_type = 'table'
self.active_table.set_table_style('booktabs')
def visit_docinfo(self, node):
pass
def depart_docinfo(self, node):
pass
def visit_author(self, node):
self.author_names.append(self.encode(node.astext()))
self.author_institution_map[self.author_names[-1]] = []
raise nodes.SkipNode
def depart_author(self, node):
pass
def visit_classifier(self, node):
pass
def depart_classifier(self, node):
pass
def visit_field_name(self, node):
self.current_field = node.astext()
raise nodes.SkipNode
def visit_field_body(self, node):
try:
text = self.encode(node.astext())
except TypeError:
text = ''
if self.current_field == 'email':
self.author_emails.append(text)
elif self.current_field == 'corresponding':
self.corresponding.append(self.author_names[-1])
elif self.current_field == 'equal-contributor':
self.equal_contributors.append(self.author_names[-1])
elif self.current_field == 'institution':
self.author_institutions.append(text)
self.author_institution_map[self.author_names[-1]].append(text)
elif self.current_field == 'copyright_holder':
self.copyright_holder = text
elif self.current_field == 'video':
self.video_url = text
elif self.current_field == 'bibliography':
self.bibtex = ['alphaurl', text]
self._use_latex_citations = True
self._bibitems = ['', '']
self.bibliography = text
self.current_field = ''
raise nodes.SkipNode
def depart_field_body(self, node):
raise nodes.SkipNode
def depart_document(self, node):
LaTeXTranslator.depart_document(self, node)
## Generate footmarks
# build map: institution -> (author1, author2)
institution_authors = OrderedDict()
for auth in self.author_institution_map:
for inst in self.author_institution_map[auth]:
institution_authors.setdefault(inst, []).append(auth)
def footmark(n):
"""Insert footmark #n. Footmark 1 is reserved for
the corresponding author. Footmark 2 is reserved for
the equal contributors.\
"""
return ('\\setcounter{footnotecounter}{%d}' % n,
'\\fnsymbol{footnotecounter}')
# Build a footmark for the corresponding author
corresponding_footmark = footmark(1)
# Build a footmark for equal contributors
equal_footmark = footmark(2)
# Build one footmark for each institution
institute_footmark = {}
for i, inst in enumerate(institution_authors):
institute_footmark[inst] = footmark(i + 3)
footmark_template = r'\thanks{%(footmark)s %(instutions)}'
corresponding_auth_template = r'''%%
%(footmark_counter)s\thanks{%(footmark)s %%
Corresponding author: \protect\href{mailto:%(email)s}{%(email)s}}'''
equal_contrib_template = r'''%%
%(footmark_counter)s\thanks{%(footmark)s %%
These authors contributed equally.}'''
title = self.paper_title
authors = []
institutions_mentioned = set()
equal_authors_mentioned = False
corr_emails = []
if len(self.corresponding) == 0:
self.corresponding = [self.author_names[0]]
for n, auth in enumerate(self.author_names):
if auth in self.corresponding:
corr_emails.append(self.author_emails[n])
for n, auth in enumerate(self.author_names):
# get footmarks
footmarks = ''.join([''.join(institute_footmark[inst]) for inst in self.author_institution_map[auth]])
if auth in self.equal_contributors:
footmarks += ''.join(equal_footmark)
if auth in self.corresponding:
footmarks += ''.join(corresponding_footmark)
authors += [r'%(author)s$^{%(footmark)s}$' %
{'author': auth,
'footmark': footmarks}]
if auth in self.equal_contributors and equal_authors_mentioned==False:
fm_counter, fm = equal_footmark
authors[-1] += equal_contrib_template % \
{'footmark_counter': fm_counter,
'footmark': fm}
equal_authors_mentioned = True
if auth in self.corresponding:
fm_counter, fm = corresponding_footmark
authors[-1] += corresponding_auth_template % \
{'footmark_counter': fm_counter,
'footmark': fm,
'email': ', '.join(corr_emails)}
for inst in self.author_institution_map[auth]:
if not inst in institutions_mentioned:
fm_counter, fm = institute_footmark[inst]
authors[-1] += r'%(footmark_counter)s\thanks{%(footmark)s %(institution)s}' % \
{'footmark_counter': fm_counter,
'footmark': fm,
'institution': inst}
institutions_mentioned.add(inst)
## Add copyright
# If things went spectacularly wrong, we could not even parse author
# info. Just fill in some dummy info so that we can see the error
# messages in the resulting PDF.
if len(self.author_names) == 0:
self.author_names = ['John Doe']
self.author_emails = ['john@doe.com']
authors = ['']
copyright_holder = self.copyright_holder or (self.author_names[0] + ('.' if len(self.author_names) == 1 else ' et al.'))
author_notes = r'''%%
\noindent%%
Copyright\,\copyright\,%(year)s %(copyright_holder)s %(copyright)s%%
''' % \
{'email': self.author_emails[0],
'year': options['proceedings']['year'],
'copyright_holder': copyright_holder,
'copyright': options['proceedings']['copyright']['article']}
authors[-1] += r'\thanks{%s}' % author_notes
## Set up title and page headers
if not self.video_url:
video_template = ''
else:
video_template = r'\\\vspace{5mm}\tt\url{%s}\vspace{-5mm}' % self.video_url
title_template = r'\newcounter{footnotecounter}' \
r'\title{%s}\author{%s' \
r'%s}\maketitle'
title_template = title_template % (title, ', '.join(authors),
video_template)
marks = r'''
\renewcommand{\leftmark}{%s}
\renewcommand{\rightmark}{%s}
''' % (options['proceedings']['title']['short'], title.upper())
title_template += marks
self.body_pre_docinfo = [title_template]
# Save paper stats
self.document.stats = {'title': title,
'authors': ', '.join(self.author_names),
'author': self.author_names,
'author_email': self.author_emails,
'author_institution': self.author_institutions,
'author_institution_map' : self.author_institution_map,
'abstract': self.abstract_text,
'keywords': self.keywords,
'copyright_holder': copyright_holder,
'video': self.video_url,
'bibliography':self.bibliography}
if hasattr(self, 'bibtex') and self.bibtex:
self.document.stats.update({'bibliography': self.bibtex[1]})
def end_open_abstract(self, node):
if 'abstract' not in node['classes'] and self.abstract_in_progress:
self.out.append('\\end{abstract}')
self.abstract_in_progress = False
elif self.abstract_in_progress:
self.abstract_text.append(self.encode(node.astext()))
def visit_title(self, node):
self.end_open_abstract(node)
if self.section_level == 1:
if self.paper_title:
import warnings
warnings.warn(RuntimeWarning("Title set twice--ignored. "
"Could be due to ReST"
"error.)"))
else:
self.paper_title = self.encode(node.astext())
raise nodes.SkipNode
elif node.astext() == 'References':
raise nodes.SkipNode
LaTeXTranslator.visit_title(self, node)
def visit_paragraph(self, node):
self.end_open_abstract(node)
if 'abstract' in node['classes'] and not self.abstract_in_progress:
self.out.append('\\begin{abstract}')
self.abstract_text.append(self.encode(node.astext()))
self.abstract_in_progress = True
elif 'keywords' in node['classes']:
self.out.append('\\begin{IEEEkeywords}')
self.keywords = self.encode(node.astext())
elif self.non_breaking_paragraph:
self.non_breaking_paragraph = False
else:
if self.active_table.is_open():
self.out.append('\n')
else:
self.out.append('\n\n')
def depart_paragraph(self, node):
if 'keywords' in node['classes']:
self.out.append('\\end{IEEEkeywords}')
def visit_figure(self, node):
self.requirements['float_settings'] = PreambleCmds.float_settings
self.figure_type = 'figure'
if 'classes' in node.attributes:
placements = '[%s]' % ''.join(node.attributes['classes'])
if 'w' in placements:
placements = placements.replace('w', '')
self.figure_type = 'figure*'
self.out.append('\\begin{%s}%s' % (self.figure_type, placements))
if node.get('ids'):
self.out += ['\n'] + self.ids_to_labels(node)
self.figure_alignment = node.attributes.get('align', 'center')
def depart_figure(self, node):
self.out.append('\\end{%s}' % self.figure_type)
def visit_image(self, node):
align = self.figure_alignment or 'center'
scale = node.attributes.get('scale', None)
filename = node.attributes['uri']
if self.figure_type == 'figure*':
width = r'\textwidth'
else:
width = r'\columnwidth'
figure_opts = []
if scale is not None:
figure_opts.append('scale=%.2f' % (scale / 100.))
# Only add \columnwidth if scale or width have not been specified.
if 'scale' not in node.attributes and 'width' not in node.attributes:
figure_opts.append(r'width=\columnwidth')
self.out.append(r'\noindent\makebox[%s][%s]' % (width, align[0]))
self.out.append(r'{\includegraphics[%s]{%s}}' % (','.join(figure_opts),
filename))
def visit_footnote(self, node):
# Handle case where footnote consists only of math
if len(node.astext().split()) < 2:
node.append(nodes.label(text='_abcdefghijklmno_'))
# Work-around for a bug in docutils where
# "%" is prepended to footnote text
LaTeXTranslator.visit_footnote(self, node)
self.out[-1] = self.out[1].strip('%')
self.non_breaking_paragraph = True
def visit_table(self, node):
classes = node.attributes.get('classes', [])
if 'w' in classes:
self.table_type = 'table*'
else:
self.table_type = 'table'
self.out.append(r'\begin{%s}' % self.table_type)
LaTeXTranslator.visit_table(self, node)
def depart_table(self, node):
LaTeXTranslator.depart_table(self, node)
self.out.append(r'\caption{%s}' % ''.join(self.table_caption))
self.table_caption = []
self.out.append(r'\end{%s}' % self.table_type)
self.active_table.set('preamble written', 1)
self.active_table.set_table_style('booktabs')
def visit_thead(self, node):
# Store table caption locally and then remove it
# from the table so that docutils doesn't render it
# (in the wrong place)
if self.active_table.caption:
self.table_caption = self.active_table.caption
self.active_table.caption = []
opening = self.active_table.get_opening()
opening = opening.replace('linewidth', 'tablewidth')
self.active_table.get_opening = lambda: opening
# For some reason, docutils want to process longtable headers twice. I
# don't trust this fix entirely, but it does the trick for now.
self.active_table.need_recurse = lambda: False
LaTeXTranslator.visit_thead(self, node)
def depart_thead(self, node):
LaTeXTranslator.depart_thead(self, node)
def visit_literal_block(self, node):
self.non_breaking_paragraph = True
if 'language' in node.attributes:
# do highlighting
from pygments import highlight
from pygments.lexers import PythonLexer, get_lexer_by_name
from pygments.formatters import LatexFormatter
extra_opts = 'fontsize=\\footnotesize'
linenos = node.attributes.get('linenos', False)
linenostart = node.attributes.get('linenostart', 1)
if linenos:
extra_opts += ',xleftmargin=2.25mm,numbersep=3pt'
lexer = get_lexer_by_name(node.attributes['language'])
tex = highlight(node.astext(), lexer,
LatexFormatter(linenos=linenos,
linenostart=linenostart,
verboptions=extra_opts))
self.out.append("\\vspace{1mm}\n" + tex +
"\\vspace{1mm}\n")
raise nodes.SkipNode
else:
LaTeXTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
LaTeXTranslator.depart_literal_block(self, node)
def visit_block_quote(self, node):
self.out.append('\\begin{quotation}')
LaTeXTranslator.visit_block_quote(self, node)
def depart_block_quote(self, node):
LaTeXTranslator.depart_block_quote(self, node)
self.out.append('\\end{quotation}')
# Math directives from rstex
def visit_InlineMath(self, node):
self.requirements['amsmath'] = r'\usepackage{amsmath}'
self.out.append('$' + node['latex'] + '$')
raise nodes.SkipNode
def visit_PartMath(self, node):
self.requirements['amsmath'] = r'\usepackage{amsmath}'
self.out.append(mathEnv(node['latex'], node['label'], node['type']))
self.non_breaking_paragraph = True
raise nodes.SkipNode
def visit_PartLaTeX(self, node):
if node["usepackage"]:
for package in node["usepackage"]:
self.requirements[package] = r'\usepackage{%s}' % package
self.out.append("\n" + node['latex'] + "\n")
raise nodes.SkipNode
writer = Writer()
writer.translator_class = Translator
| {
"repo_name": "ScienceStacks/JViz",
"path": "scipy_proceedings/publisher/writer/__init__.py",
"copies": "3",
"size": "17040",
"license": "apache-2.0",
"hash": -1550142796647500500,
"line_mean": 35.0253699789,
"line_max": 128,
"alpha_frac": 0.5641431925,
"autogenerated": false,
"ratio": 4.126907241462824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6191050433962824,
"avg_score": null,
"num_lines": null
} |
__all__ = ["write_story", "write_chapter", "write_zip"]
from zipfile import ZipFile
import codecs
import html
import json
import os
import tarfile
import tempfile
HTML_TEMPLATE = """\
<!DOCTYPE html>
<html>
<head>
<title>%s</title>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8" />
<meta charset="UTF-8" />
</head>
<body>
%%s
%s
%%s
</body>
</html>
"""
CHAPTER_HEADER_TEMPLATE = """\
<center>
<h1>%s</h1>
<h3>%s</h3>
by <a href="%s">%s</a>
</center>
"""
CHAPTER_LINKS_TEMPLATE = """\
<h3>%s</h3>
<ul>
%s
</ul>
"""
def format_html(pattern, *args):
escaped = []
for arg in args:
escaped.append(html.escape(arg))
return pattern % tuple(args)
def generate_chapter_header(chapter):
if hasattr(chapter, "title"):
name = chapter.title
else:
name = chapter.name
if hasattr(chapter, "description"):
description = chapter.description
else:
description = ""
return format_html(CHAPTER_HEADER_TEMPLATE,
name, description, chapter.author, chapter.author)
def generate_chapter_links(chapter, chapter_pool):
links = []
for choice_id, choice_url in chapter.choices:
if choice_id in chapter_pool.keys():
name = chapter_pool[choice_id].name
else:
name = choice_url
links.append("<li><a href=\"%s\">%s</a>"
% (html.escape(choice_url), html.escape(name)))
return format_html(CHAPTER_LINKS_TEMPLATE, chapter.question, "\n".join(links))
def write_story(story, dest_dir):
data = {
"title": story.title,
"description": story.description.replace("\r\n", "\n"),
"author": story.author,
"root": story.id,
"url": story.url,
}
story_path = os.path.join(dest_dir, "meta.json")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
with codecs.open(story_path, "w", "utf-8") as fh:
json.dump(data, fh)
write_chapter(story, dest_dir, story.chapters)
for chapter in story.chapters.values():
write_chapter(chapter, dest_dir)
def write_chapter(chapter, dest_dir, chapter_pool={}):
metadata = {
"name": chapter.name,
"author": chapter.author,
"id": chapter.id,
"question": chapter.question,
}
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
choice_ids = []
for choice in chapter.choices:
choice_ids.append(choice[0])
metadata["choices"] = choice_ids
html_data = format_html(HTML_TEMPLATE, chapter.name, chapter.text)
html_data = html_data % (generate_chapter_header(chapter), generate_chapter_links(chapter, chapter_pool))
metadata_path = os.path.join(dest_dir, "%s.json" % chapter.id)
html_path = os.path.join(dest_dir, "%s.html" % chapter.id)
with codecs.open(metadata_path, "w", "utf-8") as fh:
json.dump(metadata, fh)
with codecs.open(html_path, "w", "utf-8") as fh:
fh.write(html_data)
def write_tar(story, dest_file, compression="", is_story=True):
temp_dir = tempfile.TemporaryDirectory()
if is_story:
write_story(story, temp_dir.name)
else:
write_chapter(story, temp_dir.name)
with tarfile.open(dest_file, "w:%s" % compression.lower()) as tarfh:
os.chdir(temp_dir.name)
for fn in os.listdir(temp_dir.name):
tarfh.add(fn)
def write_zip(story, dest_file, is_story=True):
temp_dir = tempfile.TemporaryDirectory()
if is_story:
write_story(story, temp_dir.name)
else:
write_chapter(story, temp_dir.name)
with ZipFile(dest_file, "w") as zipfh:
os.chdir(temp_dir.name)
for fn in os.listdir(temp_dir.name):
zipfh.write(fn)
| {
"repo_name": "sexypants/chyoa-scraper",
"path": "chyoa/serial.py",
"copies": "1",
"size": "3724",
"license": "mit",
"hash": -1789131175695059700,
"line_mean": 23.6622516556,
"line_max": 109,
"alpha_frac": 0.6114393126,
"autogenerated": false,
"ratio": 3.2186689714779604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43301082840779603,
"avg_score": null,
"num_lines": null
} |
__all__ = ['W', 'WSP']
__author__ = "Sergio J. Rey <srey@asu.edu> "
import pysal
from pysal.common import ROD
import math
import numpy as np
import scipy.sparse
import gc
from pysal.weights import util
class W(object):
"""
Spatial weights
Parameters
----------
neighbors : dictionary
key is region ID, value is a list of neighbor IDS
Example: {'a':['b'],'b':['a','c'],'c':['b']}
weights = None : dictionary
key is region ID, value is a list of edge weights
If not supplied all edge weights are assumed to have a weight of 1.
Example: {'a':[0.5],'b':[0.5,1.5],'c':[1.5]}
id_order = None : list
An ordered list of ids, defines the order of
observations when iterating over W if not set,
lexicographical ordering is used to iterate and the
id_order_set property will return False. This can be
set after creation by setting the 'id_order' property.
Attributes
----------
asymmetries
cardinalities
diagW2
diagWtW
diagWtW_WW
histogram
id2i
id_order
id_order_set
islands
max_neighbors
mean_neighbors
min_neighbors
n
neighbor_offsets
nonzero
pct_nonzero
s0
s1
s2
s2array
sd
sparse
trcW2
trcWtW
trcWtW_WW
transform
Examples
--------
>>> from pysal import W, lat2W
>>> neighbors = {0: [3, 1], 1: [0, 4, 2], 2: [1, 5], 3: [0, 6, 4], 4: [1, 3, 7, 5], 5: [2, 4, 8], 6: [3, 7], 7: [4, 6, 8], 8: [5, 7]}
>>> weights = {0: [1, 1], 1: [1, 1, 1], 2: [1, 1], 3: [1, 1, 1], 4: [1, 1, 1, 1], 5: [1, 1, 1], 6: [1, 1], 7: [1, 1, 1], 8: [1, 1]}
>>> w = W(neighbors, weights)
>>> w.pct_nonzero
0.29629629629629628
Read from external gal file
>>> import pysal
>>> w = pysal.open(pysal.examples.get_path("stl.gal")).read()
>>> w.n
78
>>> w.pct_nonzero
0.065417488494411577
Set weights implicitly
>>> neighbors = {0: [3, 1], 1: [0, 4, 2], 2: [1, 5], 3: [0, 6, 4], 4: [1, 3, 7, 5], 5: [2, 4, 8], 6: [3, 7], 7: [4, 6, 8], 8: [5, 7]}
>>> w = W(neighbors)
>>> w.pct_nonzero
0.29629629629629628
>>> w = lat2W(100, 100)
>>> w.trcW2
39600.0
>>> w.trcWtW
39600.0
>>> w.transform='r'
>>> w.trcW2
2530.7222222222586
>>> w.trcWtW
2533.6666666666774
Cardinality Histogram
>>> w=pysal.rook_from_shapefile(pysal.examples.get_path("sacramentot2.shp"))
>>> w.histogram
[(1, 1), (2, 6), (3, 33), (4, 103), (5, 114), (6, 73), (7, 35), (8, 17), (9, 9), (10, 4), (11, 4), (12, 3), (13, 0), (14, 1)]
"""
def __init__(self, neighbors, weights=None, id_order=None):
self.transformations = {}
self.neighbors = ROD(neighbors)
if not weights:
weights = {}
for key in neighbors:
weights[key] = [1.] * len(neighbors[key])
self.weights = ROD(weights)
self.transformations['O'] = self.weights # original weights
self.transform = 'O'
if id_order is None:
self._id_order = self.neighbors.keys()
self._id_order.sort()
self._id_order_set = False
else:
self._id_order = id_order
self._id_order_set = True
self._reset()
self._n = len(self.weights)
def _reset(self):
"""
Reset properties
"""
self._cache = {}
@property
def sparse(self):
"""
Sparse matrix object
For any matrix manipulations required for w, w.sparse should be
used. This is based on scipy.sparse.
"""
if 'sparse' not in self._cache:
self._sparse = self._build_sparse()
self._cache['sparse'] = self._sparse
return self._sparse
def _build_sparse(self):
"""
construct the sparse attribute
"""
row = []
col = []
data = []
gc.disable()
id2i = self.id2i
for id, neigh_list in self.neighbor_offsets.iteritems():
card = self.cardinalities[id]
row.extend([id2i[id]] * card)
col.extend(neigh_list)
data.extend(self.weights[id])
gc.enable()
row = np.array(row)
col = np.array(col)
data = np.array(data)
s = scipy.sparse.csr_matrix((data, (row, col)), shape=(self.n, self.n))
return s
@property
def id2i(self):
"""
Dictionary where the key is an ID and the value is that ID's
index in W.id_order.
"""
if 'id2i' not in self._cache:
self._id2i = {}
for i, id in enumerate(self._id_order):
self._id2i[id] = i
self._id2i = ROD(self._id2i)
self._cache['id2i'] = self._id2i
return self._id2i
@property
def n(self):
"""
number of units
"""
if "n" not in self._cache:
self._n = len(self.neighbors)
self._cache['n'] = self._n
return self._n
@property
def s0(self):
"""
float
.. math::
s0=\sum_i \sum_j w_{i,j}
"""
if 's0' not in self._cache:
self._s0 = self.sparse.sum()
self._cache['s0'] = self._s0
return self._s0
@property
def s1(self):
"""
float
.. math::
s1=1/2 \sum_i \sum_j (w_{i,j} + w_{j,i})^2
"""
if 's1' not in self._cache:
t = self.sparse.transpose()
t = t + self.sparse
t2 = t.multiply(t) # element-wise square
self._s1 = t2.sum() / 2.
self._cache['s1'] = self._s1
return self._s1
@property
def s2array(self):
"""
individual elements comprising s2
See Also
--------
s2
"""
if 's2array' not in self._cache:
s = self.sparse
self._s2array = np.array(s.sum(1) + s.sum(0).transpose()) ** 2
self._cache['s2array'] = self._s2array
return self._s2array
@property
def s2(self):
"""
float
.. math::
s2=\sum_j (\sum_i w_{i,j} + \sum_i w_{j,i})^2
"""
if 's2' not in self._cache:
self._s2 = self.s2array.sum()
self._cache['s2'] = self._s2
return self._s2
@property
def trcW2(self):
"""
Trace of :math:`WW`
See Also
--------
diagW2
"""
if 'trcW2' not in self._cache:
self._trcW2 = self.diagW2.sum()
self._cache['trcw2'] = self._trcW2
return self._trcW2
@property
def diagW2(self):
"""
Diagonal of :math:`WW` : array
See Also
--------
trcW2
"""
if 'diagw2' not in self._cache:
self._diagW2 = (self.sparse * self.sparse).diagonal()
self._cache['diagW2'] = self._diagW2
return self._diagW2
@property
def diagWtW(self):
"""
Diagonal of :math:`W^{'}W` : array
See Also
--------
trcWtW
"""
if 'diagWtW' not in self._cache:
self._diagWtW = (self.sparse.transpose() * self.sparse).diagonal()
self._cache['diagWtW'] = self._diagWtW
return self._diagWtW
@property
def trcWtW(self):
"""
Trace of :math:`W^{'}W` : float
See Also
--------
diagWtW
"""
if 'trcWtW' not in self._cache:
self._trcWtW = self.diagWtW.sum()
self._cache['trcWtW'] = self._trcWtW
return self._trcWtW
@property
def diagWtW_WW(self):
"""
diagonal of :math:`W^{'}W + WW`
"""
if 'diagWtW_WW' not in self._cache:
wt = self.sparse.transpose()
w = self.sparse
self._diagWtW_WW = (wt * w + w * w).diagonal()
self._cache['diagWtW_WW'] = self._diagWtW_WW
return self._diagWtW_WW
@property
def trcWtW_WW(self):
"""
trace of :math:`W^{'}W + WW`
"""
if 'trcWtW_WW' not in self._cache:
self._trcWtW_WW = self.diagWtW_WW.sum()
self._cache['trcWtW_WW'] = self._trcWtW_WW
return self._trcWtW_WW
@property
def pct_nonzero(self):
"""
percentage of nonzero weights
"""
if 'pct_nonzero' not in self._cache:
self._pct_nonzero = self.sparse.nnz / (1. * self._n ** 2)
self._cache['pct_nonzero'] = self._pct_nonzero
return self._pct_nonzero
@property
def cardinalities(self):
"""
number of neighbors for each observation : dict
"""
if 'cardinalities' not in self._cache:
c = {}
for i in self._id_order:
c[i] = len(self.neighbors[i])
self._cardinalities = c
self._cache['cardinalities'] = self._cardinalities
return self._cardinalities
@property
def max_neighbors(self):
"""
largest number of neighbors
"""
if 'max_neighbors' not in self._cache:
self._max_neighbors = max(self.cardinalities.values())
self._cache['max_neighbors'] = self._max_neighbors
return self._max_neighbors
@property
def mean_neighbors(self):
"""
average number of neighbors
"""
if 'mean_neighbors' not in self._cache:
self._mean_neighbors = np.mean(self.cardinalities.values())
self._cache['mean_neighbors'] = self._mean_neighbors
return self._mean_neighbors
@property
def min_neighbors(self):
"""
minimum number of neighbors
"""
if 'min_neighbors' not in self._cache:
self._min_neighbors = min(self.cardinalities.values())
self._cache['min_neighbors'] = self._min_neighbors
return self._min_neighbors
@property
def nonzero(self):
"""
number of nonzero weights
"""
if 'nonzero' not in self._cache:
self._nonzero = self.sparse.nnz
self._cache['nonzero'] = self._nonzero
return self._nonzero
@property
def sd(self):
"""
standard deviation of number of neighbors : float
"""
if 'sd' not in self._cache:
self._sd = np.std(self.cardinalities.values())
self._cache['sd'] = self._sd
return self._sd
@property
def asymmetries(self):
"""
list of id pairs with asymmetric weights
"""
if 'asymmetries' not in self._cache:
self._asymmetries = self.asymmetry()
self._cache['asymmetries'] = self._asymmetries
return self._asymmetries
@property
def islands(self):
"""
list of ids without any neighbors
"""
if 'islands' not in self._cache:
self._islands = [i for i,
c in self.cardinalities.items() if c == 0]
self._cache['islands'] = self._islands
return self._islands
@property
def histogram(self):
"""
cardinality histogram as a dictionary, key is the id, value is the
number of neighbors for that unit
"""
if 'histogram' not in self._cache:
ct, bin = np.histogram(self.cardinalities.values(),
range(self.min_neighbors, self.max_neighbors + 2))
self._histogram = zip(bin, ct)
self._cache['histogram'] = self._histogram
return self._histogram
def __getitem__(self, key):
"""
Allow a dictionary like interaction with the weights class.
Examples
--------
>>> from pysal import rook_from_shapefile as rfs
>>> from pysal import lat2W
>>> w = rfs(pysal.examples.get_path('10740.shp'))
>>> w[0]
{1: 1.0, 4: 1.0, 101: 1.0, 85: 1.0, 5: 1.0}
>>> w = lat2W()
>>> w[1]
{0: 1.0, 2: 1.0, 6: 1.0}
>>> w[0]
{1: 1.0, 5: 1.0}
"""
return dict(zip(self.neighbors[key], self.weights[key]))
def __iter__(self):
"""
Support iteration over weights
Examples
--------
>>> import pysal
>>> w=pysal.lat2W(3,3)
>>> for i,wi in enumerate(w):
... print i,wi
...
0 {1: 1.0, 3: 1.0}
1 {0: 1.0, 2: 1.0, 4: 1.0}
2 {1: 1.0, 5: 1.0}
3 {0: 1.0, 4: 1.0, 6: 1.0}
4 {1: 1.0, 3: 1.0, 5: 1.0, 7: 1.0}
5 {8: 1.0, 2: 1.0, 4: 1.0}
6 {3: 1.0, 7: 1.0}
7 {8: 1.0, 4: 1.0, 6: 1.0}
8 {5: 1.0, 7: 1.0}
>>>
"""
class _W_iter:
def __init__(self, w):
self.w = w
self.n = len(w._id_order)
self._idx = 0
def next(self):
if self._idx >= self.n:
self._idx = 0
raise StopIteration
value = self.w.__getitem__(self.w._id_order[self._idx])
self._idx += 1
return value
return _W_iter(self)
def __set_id_order(self, ordered_ids):
"""
Set the iteration order in w.
W can be iterated over. On construction the iteration order is set to
the lexicographic order of the keys in the w.weights dictionary. If a specific order
is required it can be set with this method.
Parameters
----------
ordered_ids : sequence
identifiers for observations in specified order
Notes
-----
ordered_ids is checked against the ids implied by the keys in
w.weights. If they are not equivalent sets an exception is raised and
the iteration order is not changed.
Examples
--------
>>> import pysal
>>> w=pysal.lat2W(3,3)
>>> for i,wi in enumerate(w):
... print i,wi
...
0 {1: 1.0, 3: 1.0}
1 {0: 1.0, 2: 1.0, 4: 1.0}
2 {1: 1.0, 5: 1.0}
3 {0: 1.0, 4: 1.0, 6: 1.0}
4 {1: 1.0, 3: 1.0, 5: 1.0, 7: 1.0}
5 {8: 1.0, 2: 1.0, 4: 1.0}
6 {3: 1.0, 7: 1.0}
7 {8: 1.0, 4: 1.0, 6: 1.0}
8 {5: 1.0, 7: 1.0}
>>> w.id_order
[0, 1, 2, 3, 4, 5, 6, 7, 8]
>>> w.id_order=range(8,-1,-1)
>>> w.id_order
[8, 7, 6, 5, 4, 3, 2, 1, 0]
>>> for i,w_i in enumerate(w):
... print i,w_i
...
0 {5: 1.0, 7: 1.0}
1 {8: 1.0, 4: 1.0, 6: 1.0}
2 {3: 1.0, 7: 1.0}
3 {8: 1.0, 2: 1.0, 4: 1.0}
4 {1: 1.0, 3: 1.0, 5: 1.0, 7: 1.0}
5 {0: 1.0, 4: 1.0, 6: 1.0}
6 {1: 1.0, 5: 1.0}
7 {0: 1.0, 2: 1.0, 4: 1.0}
8 {1: 1.0, 3: 1.0}
>>>
"""
if set(self._id_order) == set(ordered_ids):
self._id_order = ordered_ids
self._idx = 0
self._id_order_set = True
self._reset()
else:
raise Exception('ordered_ids do not align with W ids')
def __get_id_order(self):
"""returns the ids for the observations in the order in which they
would be encountered if iterating over the weights."""
return self._id_order
id_order = property(__get_id_order, __set_id_order)
@property
def id_order_set(self):
"""returns True if user has set id_order, False if not.
Examples
--------
>>> from pysal import lat2W
>>> w=lat2W()
>>> w.id_order_set
True
"""
return self._id_order_set
@property
def neighbor_offsets(self):
"""
Given the current id_order, neighbor_offsets[id] is the offsets of the
id's neighbors in id_order
Examples
--------
>>> from pysal import W
>>> neighbors={'c': ['b'], 'b': ['c', 'a'], 'a': ['b']}
>>> weights ={'c': [1.0], 'b': [1.0, 1.0], 'a': [1.0]}
>>> w=W(neighbors,weights)
>>> w.id_order = ['a','b','c']
>>> w.neighbor_offsets['b']
[2, 0]
>>> w.id_order = ['b','a','c']
>>> w.neighbor_offsets['b']
[2, 1]
"""
if "neighbors_0" not in self._cache:
self.__neighbors_0 = {}
id2i = self.id2i
for id, neigh_list in self.neighbors.iteritems():
self.__neighbors_0[id] = [id2i[neigh] for neigh in neigh_list]
self._cache['neighbors_0'] = self.__neighbors_0
return self.__neighbors_0
def get_transform(self):
"""
Getter for transform property
Returns
-------
transformation : string (or none)
Examples
--------
>>> from pysal import lat2W
>>> w=lat2W()
>>> w.weights[0]
[1.0, 1.0]
>>> w.transform
'O'
>>> w.transform='r'
>>> w.weights[0]
[0.5, 0.5]
>>> w.transform='b'
>>> w.weights[0]
[1.0, 1.0]
>>>
"""
return self._transform
def set_transform(self, value="B"):
"""
Transformations of weights.
Notes
-----
Transformations are applied only to the value of the weights at
instantiation. Chaining of transformations cannot be done on a W
instance.
Parameters
----------
transform : string (not case sensitive)
B: Binary
R: Row-standardization (global sum=n)
D: Double-standardization (global sum=1)
V: Variance stabilizing
O: Restore original transformation (from instantiation)
Examples
--------
>>> from pysal import lat2W
>>> w=lat2W()
>>> w.weights[0]
[1.0, 1.0]
>>> w.transform
'O'
>>> w.transform='r'
>>> w.weights[0]
[0.5, 0.5]
>>> w.transform='b'
>>> w.weights[0]
[1.0, 1.0]
>>>
"""
value = value.upper()
self._transform = value
if value in self.transformations:
self.weights = self.transformations[value]
self._reset()
else:
if value == "R":
# row standardized weights
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
row_sum = sum(wijs) * 1.0
weights[i] = [wij / row_sum for wij in wijs]
weights = ROD(weights)
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "D":
# doubly-standardized weights
# update current chars before doing global sum
self._reset()
s0 = self.s0
ws = 1.0 / s0
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
weights[i] = [wij * ws for wij in wijs]
weights = ROD(weights)
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "B":
# binary transformation
weights = {}
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
weights[i] = [1.0 for wij in wijs]
weights = ROD(weights)
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "V":
# variance stabilizing
weights = {}
q = {}
k = self.cardinalities
s = {}
Q = 0.0
self.weights = self.transformations['O']
for i in self.weights:
wijs = self.weights[i]
q[i] = math.sqrt(sum([wij * wij for wij in wijs]))
s[i] = [wij / q[i] for wij in wijs]
Q += sum([si for si in s[i]])
nQ = self.n / Q
for i in self.weights:
weights[i] = [w * nQ for w in s[i]]
weights = ROD(weights)
self.transformations[value] = weights
self.weights = weights
self._reset()
elif value == "O":
# put weights back to original transformation
weights = {}
original = self.transformations[value]
self.weights = original
self._reset()
else:
print 'unsupported weights transformation'
transform = property(get_transform, set_transform)
def asymmetry(self, intrinsic=True):
"""
Asymmetry check
Arguments
---------
intrinsic: boolean (default=True)
intrinsic symmetry:
:math:`w_{i,j} == w_{j,i}`
if intrisic is False:
symmetry is defined as :math:`i \in N_j \ AND \ j \in N_i` where
:math:`N_j` is the set of neighbors for j.
Returns
-------
asymmetries : list
empty if no asymmetries are found
if asymmetries, first list is row indices, second
list is column indices of asymmetric cells
Examples
--------
>>> from pysal import lat2W
>>> w=lat2W(3,3)
>>> w.asymmetry()
[]
>>> w.transform='r'
>>> result = w.asymmetry()[0:2]
>>> print result[0]
[1 3 0 2 4 1 5 0 4 6 1 3 5 7 2 4 8 3 7 4 6 8 5 7]
>>> print result[1]
[0 0 1 1 1 2 2 3 3 3 4 4 4 4 5 5 5 6 6 7 7 7 8 8]
>>> result = w.asymmetry(intrinsic=False)
>>> result
[]
>>> neighbors={0:[1,2,3], 1:[1,2,3], 2:[0,1], 3:[0,1]}
>>> weights={0:[1,1,1], 1:[1,1,1], 2:[1,1], 3:[1,1]}
>>> w=W(neighbors,weights)
>>> result = w.asymmetry()
>>> print result[0]
[1 0]
>>> print result[1]
[0 1]
"""
if intrinsic:
wd = self.sparse.transpose() - self.sparse
else:
transform = self.transform
self.transform = 'b'
wd = self.sparse.transpose() - self.sparse
self.transform = transform
ids = np.nonzero(wd)
if len(ids[0]) == 0:
return []
else:
return ids
def full(self):
"""
Generate a full numpy array
Returns
-------
implicit : tuple
first element being the full numpy array and second element
keys being the ids associated with each row in the array.
Examples
--------
>>> from pysal import W
>>> neighbors={'first':['second'],'second':['first','third'],'third':['second']}
>>> weights={'first':[1],'second':[1,1],'third':[1]}
>>> w=W(neighbors,weights)
>>> wf,ids=w.full()
>>> wf
array([[ 0., 1., 0.],
[ 1., 0., 1.],
[ 0., 1., 0.]])
>>> ids
['first', 'second', 'third']
See also
--------
full
"""
return util.full(self)
class WSP(object):
"""
Thin W class for spreg
Parameters
----------
sparse : scipy sparse object
NxN object from scipy.sparse
id_order : list
An ordered list of ids, assumed to match the ordering in
sparse.
Attributes
----------
n
s0
trcWtW_WW
Examples
--------
From GAL information
>>> import scipy.sparse
>>> import pysal
>>> rows = [0, 1, 1, 2, 2, 3]
>>> cols = [1, 0, 2, 1, 3, 3]
>>> weights = [1, 0.75, 0.25, 0.9, 0.1, 1]
>>> sparse = scipy.sparse.csr_matrix((weights, (rows, cols)), shape=(4,4))
>>> w = pysal.weights.WSP(sparse)
>>> w.s0
4.0
>>> w.trcWtW_WW
6.3949999999999996
>>> w.n
4
"""
def __init__(self, sparse, id_order=None):
if not scipy.sparse.issparse(sparse):
raise ValueError("must pass a scipy sparse object")
rows, cols = sparse.shape
if rows != cols:
raise ValueError("Weights object must be square")
self.sparse = sparse.tocsr()
self.n = sparse.shape[0]
if id_order:
if len(id_order) != self.n:
raise ValueError("Number of values in id_order must match shape of sparse")
self.id_order = id_order
self._cache = {}
@property
def s0(self):
"""
float
.. math::
s0=\sum_i \sum_j w_{i,j}
"""
if 's0' not in self._cache:
self._s0 = self.sparse.sum()
self._cache['s0'] = self._s0
return self._s0
@property
def trcWtW_WW(self):
"""
trace of :math:`W^{'}W + WW`
"""
if 'trcWtW_WW' not in self._cache:
self._trcWtW_WW = self.diagWtW_WW.sum()
self._cache['trcWtW_WW'] = self._trcWtW_WW
return self._trcWtW_WW
@property
def diagWtW_WW(self):
"""
diagonal of :math:`W^{'}W + WW`
"""
if 'diagWtW_WW' not in self._cache:
wt = self.sparse.transpose()
w = self.sparse
self._diagWtW_WW = (wt * w + w * w).diagonal()
self._cache['diagWtW_WW'] = self._diagWtW_WW
return self._diagWtW_WW
| {
"repo_name": "AlanZatarain/pysal",
"path": "pysal/weights/weights.py",
"copies": "5",
"size": "26309",
"license": "bsd-3-clause",
"hash": 5641957238633307000,
"line_mean": 26.810782241,
"line_max": 137,
"alpha_frac": 0.465810179,
"autogenerated": false,
"ratio": 3.6064427690198766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016802370322543387,
"num_lines": 946
} |
__all__ = ["XwalkImpl"]
from xwalk import Xwalk
from status import *
from devtools_http_client import WebViewsInfo
from devtools_http_client import WebViewInfo
from web_view_impl import WebViewImpl
from base.log import VLOG
class XwalkImpl(Xwalk):
def __init__(self, client, devtools_event_listeners, port_reservation):
Xwalk.__init__(self)
self.quit = False
self.devtools_http_client = client
self.web_views = []
self.devtools_event_listeners = devtools_event_listeners
self.port_reservation = port_reservation
def Update(self, other):
self.quit = other.quit
self.devtools_http_client = other.devtools_http_client
self.web_views = other.web_views
self.devtools_event_listeners = other.devtools_event_listeners
self.port_reservation = other.port_reservation
def __del__(self):
if not self.quit:
self.port_reservation.Leak()
# Overridden from Xwalk:
def GetVersion(self):
return self.devtools_http_client.version
def GetAsDesktop(self):
return None
def GetBuildNo(self):
return self.devtools_http_client.build_no
def HasCrashedWebView(self):
for item in self.web_views:
if item.WasCrashed():
return True
return False
def GetWebViewIds(self, web_view_ids=[]):
views_info = WebViewsInfo()
status = self.devtools_http_client.GetWebViewsInfo(views_info)
if status.IsError():
return status
# Check if some web views are closed
it = 0
while it != len(self.web_views):
if views_info.GetForId((self.web_views)[it].GetId()) == None:
del (self.web_views)[it]
else:
it = it + 1
# Check for newly-opened web views
for view in views_info.views_info:
if view.typer != WebViewInfo.kPage:
continue
found = False
for web_view_iter in self.web_views:
if web_view_iter.GetId() == view.sid:
found = True
break
if not found:
client = self.devtools_http_client.CreateClient(view.sid)
for listener in self.devtools_event_listeners:
client.AddListener(listener)
self.web_views.append(WebViewImpl(view.sid, self.GetBuildNo(), client))
web_view_ids_tmp = []
for web_view_iter in self.web_views:
web_view_ids_tmp.append(web_view_iter.GetId())
web_view_ids[:] = web_view_ids_tmp
return Status(kOk)
def GetWebViewById(self, sid, web_view):
for item in self.web_views:
if item.GetId() == sid:
web_view.Update(item)
return Status(kOk)
return Status(kUnknownError, "webview not found")
def CloseWebView(self, sid):
status = self.devtools_http_client.CloseWebView(sid)
if status.IsError():
return status
for item in self.web_views:
if item.GetId() == sid:
self.web_views.remove(item)
break
return Status(kOk)
def ActivateWebView(self, sid):
return self.devtools_http_client.ActivateWebView(sid)
def Quit(self):
status = self.QuitImpl()
if status.IsOk():
self.quit = True
return Status(kOk)
def QuitImpl(self):
return Status(kOk)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "browser/xwalk_impl.py",
"copies": "1",
"size": "3122",
"license": "bsd-3-clause",
"hash": 300599243435553400,
"line_mean": 28.1775700935,
"line_max": 79,
"alpha_frac": 0.6604740551,
"autogenerated": false,
"ratio": 3.503928170594837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4664402225694837,
"avg_score": null,
"num_lines": null
} |
__all__ = ["YamlReaderError", "yaml_load"]
__version__ = "3.0.1"
from yaml import MarkedYAMLError, safe_load, safe_dump
import glob
import os
import logging
class NoDefault(object):
def __str__(self):
return "No default data"
NO_DEFAULT = NoDefault()
class YamlReaderError(Exception):
pass
def data_merge(a, b):
"""merges b into a and return merged result
based on http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge
and extended to also merge arrays and to replace the content of keys with the same name
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen"""
key = None
# ## debug output
# sys.stderr.write("DEBUG: %s to %s\n" %(b,a))
try:
if a is None or isinstance(a, str) or isinstance(a, unicode) or isinstance(a, int) or isinstance(a, long) or isinstance(a, float):
# border case for first run or if a is a primitive
a = b
elif isinstance(a, list):
# lists can be only appended
if isinstance(b, list):
# merge lists
a.extend(b)
else:
# append to list
a.append(b)
elif isinstance(a, dict):
# dicts must be merged
if isinstance(b, dict):
for key in b:
if key in a:
a[key] = data_merge(a[key], b[key])
else:
a[key] = b[key]
else:
raise YamlReaderError('Cannot merge non-dict "%s" into dict "%s"' % (b, a))
else:
raise YamlReaderError('NOT IMPLEMENTED "%s" into "%s"' % (b, a))
except TypeError, e:
raise YamlReaderError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
return a
def yaml_load(source, defaultdata=NO_DEFAULT):
"""merge YAML data from files found in source
Always returns a dict. The YAML files are expected to contain some kind of
key:value structures, possibly deeply nested. When merging, lists are
appended and dict keys are replaced. The YAML files are read with the
yaml.safe_load function.
source can be a file, a dir, a list/tuple of files or a string containing
a glob expression (with ?*[]).
For a directory, all *.yaml files will be read in alphabetical order.
defaultdata can be used to initialize the data.
"""
logger = logging.getLogger(__name__)
logger.debug("initialized with source=%s, defaultdata=%s" % (source, defaultdata))
if defaultdata is NO_DEFAULT:
data = None
else:
data = defaultdata
files = []
if type(source) is not str and len(source) == 1:
# when called from __main source is always a list, even if it contains only one item.
# turn into a string if it contains only one item to support our different call modes
source = source[0]
if type(source) is list or type(source) is tuple:
# got a list, assume to be files
files = source
elif os.path.isdir(source):
# got a dir, read all *.yaml files
files = sorted(glob.glob(os.path.join(source, "*.yaml")))
elif os.path.isfile(source):
# got a single file, turn it into list to use the same code
files = [source]
else:
# try to use the source as a glob
files = sorted(glob.glob(source))
if files:
logger.debug("Reading %s\n" % ", ".join(files))
for yaml_file in files:
try:
try:
f = open(yaml_file)
new_data = safe_load(f)
logger.debug("YAML LOAD: %s" % new_data)
finally:
f.close()
except MarkedYAMLError, e:
logger.error("YAML Error: %s" % str(e))
raise YamlReaderError("YAML Error: %s" % str(e))
if new_data is not None:
data = data_merge(data, new_data)
else:
if defaultdata is NO_DEFAULT:
logger.error("No YAML data found in %s and no default data given" % source)
raise YamlReaderError("No YAML data found in %s" % source)
return data
def __main():
import optparse
parser = optparse.OptionParser(usage="%prog [options] source...",
description="Merge YAML data from given files, dir or file glob",
version="%" + "prog %s" % __version__,
prog="yamlreader")
parser.add_option("--debug", dest="debug", action="store_true", default=False, help="Enable debug logging [%default]")
options, args = parser.parse_args()
if options.debug:
logger = logging.getLogger()
loghandler = logging.StreamHandler()
loghandler.setFormatter(logging.Formatter('yamlreader: %(levelname)s: %(message)s'))
logger.addHandler(loghandler)
logger.setLevel(logging.DEBUG)
if not args:
parser.error("Need at least one argument")
try:
print safe_dump(yaml_load(args, defaultdata={}),
indent=4, default_flow_style=False, canonical=False)
except Exception, e:
parser.error(e)
if __name__ == "__main__":
__main()
| {
"repo_name": "pombredanne/yamlreader",
"path": "src/yamlreader.py",
"copies": "1",
"size": "5353",
"license": "apache-2.0",
"hash": -2794685030680740400,
"line_mean": 36.4335664336,
"line_max": 138,
"alpha_frac": 0.5791145152,
"autogenerated": false,
"ratio": 4.0614567526555385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016618440357175674,
"num_lines": 143
} |
import sys,os
sys.path.append(your_djangoproject_home)
os.environ['DJANGO_SETTINGS_MODULE'] ='mysite.settings'
from sweetspot.models import Locations, Signs
Signs.objects.all().delete()
Locations.objects.all().delete()
import csv
dataReader = csv.reader(open(csv_filepathname), delimiter=',')
for row in dataReader:
location = Locations()
location.boroughCode = row[0]
location.locationCode = row[1]
location.mainStreet = row[2]
location.crossStreet1 = row[3]
location.crossStreet2 = row[4]
location.geoDirection = row[5]
location.save()
#csv_filepathname="/Users/sair/Downloads/signs.csv"
csv_filepathname="/Users/sair/Downloads/signs_top1000_locations.csv"
dataReader = csv.reader(open(csv_filepathname), delimiter=',', quotechar='"')
for row in dataReader:
sign = Signs()
sign.boroughCode = row[0]
sign.locationCode = row[1]
sign.locID = row[2]
sign.FtFromCurb = row[3]
sign.geoDirection = row[4]
sign.desc = row[5]
sign_location = Locations.objects.get(locationCode = row[1])
sign.location = sign_location
sign.save()
| {
"repo_name": "psiCode/shoutserver",
"path": "mysite/sweetspot/dataImport.py",
"copies": "1",
"size": "1530",
"license": "bsd-2-clause",
"hash": -6989562442786302000,
"line_mean": 33.7727272727,
"line_max": 81,
"alpha_frac": 0.7091503268,
"autogenerated": false,
"ratio": 3.3260869565217392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4535237283321739,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ZeroInflatedPoisson", "ZeroInflatedGeneralizedPoisson",
"ZeroInflatedNegativeBinomialP"]
import warnings
import numpy as np
import statsmodels.base.model as base
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.discrete.discrete_model import (DiscreteModel, CountModel,
Poisson, Logit, CountResults,
L1CountResults, Probit,
_discrete_results_docs,
_validate_l1_method,
GeneralizedPoisson,
NegativeBinomialP)
from statsmodels.distributions import zipoisson, zigenpoisson, zinegbin
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.compat.pandas import Appender
_doc_zi_params = """
exog_infl : array_like or None
Explanatory variables for the binary inflation model, i.e. for
mixing probability model. If None, then a constant is used.
offset : array_like
Offset is added to the linear prediction with coefficient equal to 1.
exposure : array_like
Log(exposure) is added to the linear prediction with coefficient
equal to 1.
inflation : {'logit', 'probit'}
The model for the zero inflation, either Logit (default) or Probit
"""
class GenericZeroInflated(CountModel):
__doc__ = """
Generic Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None,
inflation='logit', exposure=None, missing='none', **kwargs):
super(GenericZeroInflated, self).__init__(endog, exog, offset=offset,
exposure=exposure,
missing=missing, **kwargs)
if exog_infl is None:
self.k_inflate = 1
self.exog_infl = np.ones((endog.size, self.k_inflate),
dtype=np.float64)
else:
self.exog_infl = exog_infl
self.k_inflate = exog_infl.shape[1]
if len(exog.shape) == 1:
self.k_exog = 1
else:
self.k_exog = exog.shape[1]
self.infl = inflation
if inflation == 'logit':
self.model_infl = Logit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_logit
elif inflation == 'probit':
self.model_infl = Probit(np.zeros(self.exog_infl.shape[0]),
self.exog_infl)
self._hessian_inflate = self._hessian_probit
else:
raise ValueError("inflation == %s, which is not handled"
% inflation)
self.inflation = inflation
self.k_extra = self.k_inflate
if len(self.exog) != len(self.exog_infl):
raise ValueError('exog and exog_infl have different number of'
'observation. `missing` handling is not supported')
infl_names = ['inflate_%s' % i for i in self.model_infl.data.param_names]
self.exog_names[:] = infl_names + list(self.exog_names)
self.exog_infl = np.asarray(self.exog_infl, dtype=np.float64)
self._init_keys.extend(['exog_infl', 'inflation'])
self._null_drop_keys = ['exog_infl']
def loglike(self, params):
"""
Loglikelihood of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : float
The log-likelihood function of the model evaluated at `params`.
See notes.
Notes
--------
.. math:: \\ln L=\\sum_{y_{i}=0}\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\sum_{y_{i}>0}(\\ln(1-w_{i})+L_{main\\_model})
where P - pdf of main model, L - loglike function of main model.
"""
return np.sum(self.loglikeobs(params))
def loglikeobs(self, params):
"""
Loglikelihood for observations of Generic Zero Inflated model.
Parameters
----------
params : array_like
The parameters of the model.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`. See Notes for definition.
Notes
--------
.. math:: \\ln L=\\ln(w_{i}+(1-w_{i})*P_{main\\_model})+
\\ln(1-w_{i})+L_{main\\_model}
where P - pdf of main model, L - loglike function of main model.
for observations :math:`i=1,...,n`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
llf_main = self.model_main.loglikeobs(params_main)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
llf = np.zeros_like(y, dtype=np.float64)
llf[zero_idx] = (np.log(w[zero_idx] +
(1 - w[zero_idx]) * np.exp(llf_main[zero_idx])))
llf[nonzero_idx] = np.log(1 - w[nonzero_idx]) + llf_main[nonzero_idx]
return llf
@Appender(DiscreteModel.fit.__doc__)
def fit(self, start_params=None, method='bfgs', maxiter=35,
full_output=1, disp=1, callback=None,
cov_type='nonrobust', cov_kwds=None, use_t=None, **kwargs):
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self._get_start_params()
if callback is None:
# work around perfect separation callback #3895
callback = lambda *x: x
mlefit = super(GenericZeroInflated, self).fit(start_params=start_params,
maxiter=maxiter, disp=disp, method=method,
full_output=full_output, callback=callback,
**kwargs)
zipfit = self.result_class(self, mlefit._results)
result = self.result_class_wrapper(zipfit)
if cov_kwds is None:
cov_kwds = {}
result._get_robustcov_results(cov_type=cov_type,
use_self=True, use_t=use_t, **cov_kwds)
return result
@Appender(DiscreteModel.fit_regularized.__doc__)
def fit_regularized(self, start_params=None, method='l1',
maxiter='defined_by_method', full_output=1, disp=1, callback=None,
alpha=0, trim_mode='auto', auto_trim_tol=0.01, size_trim_tol=1e-4,
qc_tol=0.03, **kwargs):
_validate_l1_method(method)
if np.size(alpha) == 1 and alpha != 0:
k_params = self.k_exog + self.k_inflate
alpha = alpha * np.ones(k_params)
extra = self.k_extra - self.k_inflate
alpha_p = alpha[:-(self.k_extra - extra)] if (self.k_extra
and np.size(alpha) > 1) else alpha
if start_params is None:
offset = getattr(self, "offset", 0) + getattr(self, "exposure", 0)
if np.size(offset) == 1 and offset == 0:
offset = None
start_params = self.model_main.fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=0, callback=callback,
alpha=alpha_p, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs).params
start_params = np.append(np.ones(self.k_inflate), start_params)
cntfit = super(CountModel, self).fit_regularized(
start_params=start_params, method=method, maxiter=maxiter,
full_output=full_output, disp=disp, callback=callback,
alpha=alpha, trim_mode=trim_mode, auto_trim_tol=auto_trim_tol,
size_trim_tol=size_trim_tol, qc_tol=qc_tol, **kwargs)
discretefit = self.result_class_reg(self, cntfit)
return self.result_class_reg_wrapper(discretefit)
def score_obs(self, params):
"""
Generic Zero Inflated model score (gradient) vector of the log-likelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
score : ndarray, 1-D
The score vector of the model, i.e. the first derivative of the
loglikelihood function, evaluated at `params`
"""
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
dldp = np.zeros((self.exog.shape[0], self.k_exog), dtype=np.float64)
dldw = np.zeros_like(self.exog_infl, dtype=np.float64)
dldp[zero_idx,:] = (score_main[zero_idx].T *
(1 - (w[zero_idx]) / np.exp(llf[zero_idx]))).T
dldp[nonzero_idx,:] = score_main[nonzero_idx]
if self.inflation == 'logit':
dldw[zero_idx,:] = (self.exog_infl[zero_idx].T * w[zero_idx] *
(1 - w[zero_idx]) *
(1 - np.exp(llf_main[zero_idx])) /
np.exp(llf[zero_idx])).T
dldw[nonzero_idx,:] = -(self.exog_infl[nonzero_idx].T *
w[nonzero_idx]).T
elif self.inflation == 'probit':
return approx_fprime(params, self.loglikeobs)
return np.hstack((dldw, dldp))
def score(self, params):
return self.score_obs(params).sum(0)
def _hessian_main(self, params):
pass
def _hessian_logit(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score_main = self.model_main.score_obs(params_main)
llf_main = self.model_main.loglikeobs(params_main)
llf = self.loglikeobs(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
hess_arr = np.zeros((self.k_inflate, self.k_exog + self.k_inflate))
pmf = np.exp(llf)
#d2l/dw2
for i in range(self.k_inflate):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog_infl[zero_idx, i] * self.exog_infl[zero_idx, j] *
(w[zero_idx] * (1 - w[zero_idx]) * ((1 -
np.exp(llf_main[zero_idx])) * (1 - 2 * w[zero_idx]) *
np.exp(llf[zero_idx]) - (w[zero_idx] - w[zero_idx]**2) *
(1 - np.exp(llf_main[zero_idx]))**2) /
pmf[zero_idx]**2)).sum() -
(self.exog_infl[nonzero_idx, i] * self.exog_infl[nonzero_idx, j] *
w[nonzero_idx] * (1 - w[nonzero_idx])).sum())
#d2l/dpdw
for i in range(self.k_inflate):
for j in range(self.k_exog):
hess_arr[i, j + self.k_inflate] = -(score_main[zero_idx, j] *
w[zero_idx] * (1 - w[zero_idx]) *
self.exog_infl[zero_idx, i] / pmf[zero_idx]).sum()
return hess_arr
def _hessian_probit(self, params):
pass
def hessian(self, params):
"""
Generic Zero Inflated model Hessian matrix of the loglikelihood
Parameters
----------
params : array_like
The parameters of the model
Returns
-------
hess : ndarray, (k_vars, k_vars)
The Hessian, second derivative of loglikelihood function,
evaluated at `params`
Notes
-----
"""
hess_arr_main = self._hessian_main(params)
hess_arr_infl = self._hessian_inflate(params)
if hess_arr_main is None or hess_arr_infl is None:
return approx_hess(params, self.loglike)
dim = self.k_exog + self.k_inflate
hess_arr = np.zeros((dim, dim))
hess_arr[:self.k_inflate,:] = hess_arr_infl
hess_arr[self.k_inflate:,self.k_inflate:] = hess_arr_main
tri_idx = np.triu_indices(self.k_exog + self.k_inflate, k=1)
hess_arr[tri_idx] = hess_arr.T[tri_idx]
return hess_arr
def predict(self, params, exog=None, exog_infl=None, exposure=None,
offset=None, which='mean'):
"""
Predict response variable of a count model given exogenous variables.
Parameters
----------
params : array_like
The parameters of the model
exog : ndarray, optional
A reference to the exogenous design.
If not assigned, will be used exog from fitting.
exog_infl : ndarray, optional
A reference to the zero-inflated exogenous design.
If not assigned, will be used exog from fitting.
offset : ndarray, optional
Offset is added to the linear prediction with coefficient equal to 1.
exposure : ndarray, optional
Log(exposure) is added to the linear prediction with coefficient
equal to 1. If exposure is specified, then it will be logged by the method.
The user does not need to log it first.
which : str, optional
Define values that will be predicted.
'mean', 'mean-main', 'linear', 'mean-nonzero', 'prob-zero, 'prob', 'prob-main'
Default is 'mean'.
Notes
-----
"""
if exog is None:
exog = self.exog
if exog_infl is None:
exog_infl = self.exog_infl
if exposure is None:
exposure = getattr(self, 'exposure', 0)
else:
exposure = np.log(exposure)
if offset is None:
offset = 0
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
prob_main = 1 - self.model_infl.predict(params_infl, exog_infl)
lin_pred = np.dot(exog, params_main[:self.exog.shape[1]]) + exposure + offset
# Refactor: This is pretty hacky,
# there should be an appropriate predict method in model_main
# this is just prob(y=0 | model_main)
tmp_exog = self.model_main.exog
tmp_endog = self.model_main.endog
tmp_offset = getattr(self.model_main, 'offset', ['no'])
tmp_exposure = getattr(self.model_main, 'exposure', ['no'])
self.model_main.exog = exog
self.model_main.endog = np.zeros((exog.shape[0]))
self.model_main.offset = offset
self.model_main.exposure = exposure
llf = self.model_main.loglikeobs(params_main)
self.model_main.exog = tmp_exog
self.model_main.endog = tmp_endog
# tmp_offset might be an array with elementwise equality testing
if len(tmp_offset) == 1 and tmp_offset[0] == 'no':
del self.model_main.offset
else:
self.model_main.offset = tmp_offset
if len(tmp_exposure) == 1 and tmp_exposure[0] == 'no':
del self.model_main.exposure
else:
self.model_main.exposure = tmp_exposure
# end hack
prob_zero = (1 - prob_main) + prob_main * np.exp(llf)
if which == 'mean':
return prob_main * np.exp(lin_pred)
elif which == 'mean-main':
return np.exp(lin_pred)
elif which == 'linear':
return lin_pred
elif which == 'mean-nonzero':
return prob_main * np.exp(lin_pred) / (1 - prob_zero)
elif which == 'prob-zero':
return prob_zero
elif which == 'prob-main':
return prob_main
elif which == 'prob':
return self._predict_prob(params, exog, exog_infl, exposure, offset)
else:
raise ValueError('which = %s is not available' % which)
class ZeroInflatedPoisson(GenericZeroInflated):
__doc__ = """
Poisson Zero Inflated Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', missing='none', **kwargs):
super(ZeroInflatedPoisson, self).__init__(endog, exog, offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = Poisson(self.endog, self.exog, offset=offset,
exposure=exposure)
self.distribution = zipoisson
self.result_class = ZeroInflatedPoissonResults
self.result_class_wrapper = ZeroInflatedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedPoissonResultsWrapper
def _hessian_main(self, params):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
y = self.endog
w = self.model_infl.predict(params_infl)
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
score = self.score(params)
zero_idx = np.nonzero(y == 0)[0]
nonzero_idx = np.nonzero(y)[0]
mu = self.model_main.predict(params_main)
hess_arr = np.zeros((self.k_exog, self.k_exog))
coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))
#d2l/dp2
for i in range(self.k_exog):
for j in range(i, -1, -1):
hess_arr[i, j] = ((
self.exog[zero_idx, i] * self.exog[zero_idx, j] *
mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
self.exog[nonzero_idx, j]).sum())
return hess_arr
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, w)
return result[0] if transform else result
def _get_start_params(self):
start_params = self.model_main.fit(disp=0, method="nm").params
start_params = np.append(np.ones(self.k_inflate) * 0.1, start_params)
return start_params
class ZeroInflatedGeneralizedPoisson(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Poisson Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZIGP regression.
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the GeneralizedPoisson model. p=1 for
ZIGP-1 and p=2 for ZIGP-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedGeneralizedPoisson, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = GeneralizedPoisson(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zigenpoisson
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedGeneralizedPoissonResults
self.result_class_wrapper = ZeroInflatedGeneralizedPoissonResultsWrapper
self.result_class_reg = L1ZeroInflatedGeneralizedPoissonResults
self.result_class_reg_wrapper = L1ZeroInflatedGeneralizedPoissonResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedGeneralizedPoisson, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization + 1
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.atleast_2d(np.arange(0, np.max(self.endog)+1))
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w[w == 1.] = np.nextafter(1, 0)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = ZeroInflatedPoisson(self.endog, self.exog,
exog_infl=self.exog_infl).fit(disp=0).params
start_params = np.append(start_params, 0.1)
return start_params
class ZeroInflatedNegativeBinomialP(GenericZeroInflated):
__doc__ = """
Zero Inflated Generalized Negative Binomial Model
%(params)s
%(extra_params)s
Attributes
----------
endog : ndarray
A reference to the endogenous response variable
exog : ndarray
A reference to the exogenous design.
exog_infl : ndarray
A reference to the zero-inflated exogenous design.
p : scalar
P denotes parametrizations for ZINB regression. p=1 for ZINB-1 and
p=2 for ZINB-2. Default is p=2
""" % {'params' : base._model_params_doc,
'extra_params' : _doc_zi_params +
"""p : float
dispersion power parameter for the NegativeBinomialP model. p=1 for
ZINB-1 and p=2 for ZINM-2. Default is p=2
""" + base._missing_param_doc}
def __init__(self, endog, exog, exog_infl=None, offset=None, exposure=None,
inflation='logit', p=2, missing='none', **kwargs):
super(ZeroInflatedNegativeBinomialP, self).__init__(endog, exog,
offset=offset,
inflation=inflation,
exog_infl=exog_infl,
exposure=exposure,
missing=missing, **kwargs)
self.model_main = NegativeBinomialP(self.endog, self.exog,
offset=offset, exposure=exposure, p=p)
self.distribution = zinegbin
self.k_exog += 1
self.k_extra += 1
self.exog_names.append("alpha")
self.result_class = ZeroInflatedNegativeBinomialResults
self.result_class_wrapper = ZeroInflatedNegativeBinomialResultsWrapper
self.result_class_reg = L1ZeroInflatedNegativeBinomialResults
self.result_class_reg_wrapper = L1ZeroInflatedNegativeBinomialResultsWrapper
def _get_init_kwds(self):
kwds = super(ZeroInflatedNegativeBinomialP, self)._get_init_kwds()
kwds['p'] = self.model_main.parameterization
return kwds
def _predict_prob(self, params, exog, exog_infl, exposure, offset):
params_infl = params[:self.k_inflate]
params_main = params[self.k_inflate:]
p = self.model_main.parameterization
counts = np.arange(0, np.max(self.endog)+1)
if len(exog_infl.shape) < 2:
transform = True
w = np.atleast_2d(
self.model_infl.predict(params_infl, exog_infl))[:, None]
else:
transform = False
w = self.model_infl.predict(params_infl, exog_infl)[:, None]
w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
mu = self.model_main.predict(params_main, exog,
exposure=exposure, offset=offset)[:, None]
result = self.distribution.pmf(counts, mu, params_main[-1], p, w)
return result[0] if transform else result
def _get_start_params(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
start_params = self.model_main.fit(disp=0, method='nm').params
start_params = np.append(np.zeros(self.k_inflate), start_params)
return start_params
class ZeroInflatedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
mu = self.predict(which='linear')
w = 1 - self.predict() / np.exp(self.predict(which='linear'))
return (1 + w * np.exp(mu))
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedPoissonResults(L1CountResults, ZeroInflatedPoissonResults):
pass
class ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedPoissonResultsWrapper,
ZeroInflatedPoissonResults)
class L1ZeroInflatedPoissonResultsWrapper(lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedPoissonResultsWrapper,
L1ZeroInflatedPoissonResults)
class ZeroInflatedGeneralizedPoissonResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Poisson",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return ((1 + alpha * mu**p)**2 + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedGeneralizedPoissonResults(L1CountResults,
ZeroInflatedGeneralizedPoissonResults):
pass
class ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedGeneralizedPoissonResultsWrapper,
ZeroInflatedGeneralizedPoissonResults)
class L1ZeroInflatedGeneralizedPoissonResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedGeneralizedPoissonResultsWrapper,
L1ZeroInflatedGeneralizedPoissonResults)
class ZeroInflatedNegativeBinomialResults(CountResults):
__doc__ = _discrete_results_docs % {
"one_line_description": "A results class for Zero Inflated Generalized Negative Binomial",
"extra_attr": ""}
@cache_readonly
def _dispersion_factor(self):
p = self.model.model_main.parameterization
alpha = self.params[self.model.k_inflate:][-1]
mu = np.exp(self.predict(which='linear'))
w = 1 - self.predict() / mu
return (1 + alpha * mu**(p-1) + w * mu)
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Not yet implemented for Zero Inflated Models
"""
raise NotImplementedError("not yet implemented for zero inflation")
class L1ZeroInflatedNegativeBinomialResults(L1CountResults,
ZeroInflatedNegativeBinomialResults):
pass
class ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(ZeroInflatedNegativeBinomialResultsWrapper,
ZeroInflatedNegativeBinomialResults)
class L1ZeroInflatedNegativeBinomialResultsWrapper(
lm.RegressionResultsWrapper):
pass
wrap.populate_wrapper(L1ZeroInflatedNegativeBinomialResultsWrapper,
L1ZeroInflatedNegativeBinomialResults)
| {
"repo_name": "jseabold/statsmodels",
"path": "statsmodels/discrete/count_model.py",
"copies": "4",
"size": "31310",
"license": "bsd-3-clause",
"hash": -7951373604745448000,
"line_mean": 37.4171779141,
"line_max": 98,
"alpha_frac": 0.5762056851,
"autogenerated": false,
"ratio": 3.717202896830108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6293408581930108,
"avg_score": null,
"num_lines": null
} |
__all__ = ["zeroSR1"]
import numpy as np
import scipy.linalg
import datetime
def zeroSR1(fcnGrad, h, prox, options):
"""
ZEROSR1 Solves smooth + nonsmooth/constrained optimization problems
xk,nit, stepSizes = zeroSR1(fcnGrad, h, prox_h, opts)
This uses the zero-memory SR1 method (quasi-Newton) to solve:
min_x f(x) + h(x)
where
'fcnGrad' calculates f(x) and its gradient at x,
and h(x) is a non-smooth term that can be infinite-valued (a constraint),
so long as you present a function 'prox' that computes diagional plus
rank-1 projections. The 'prox' function should accept at least three inputs:
'h' is the non-smooth function, and prox_h is a function with
3 or 4 inputs that returns:
y = prox_h( x0 , d, v, )
where
y = argmin_x h(x) + 1/2||x-x0||^2_B
and
B = inv(H) = inv( diag(D) + v*v' )
or, for the case with 4 arguments, y = prox_h( x0, d, v, sigma )
then B = inv( diag(D) + sigma*v*v' ) where sigma should be +1 or -1
The 4 argument case only matters when opts.SR1=true and opts.BB_type=1
or opts.SR1=true, opts.BB_type=1 and opts.SR1_diagWeight > 1
If 'prox_h' isn't provided or is [], it defaults to the identity mapping, which corresponds
to the case when h=0.
'prox_h' is mean to be given by something like prox_rank1_l1
e.g.,
prox = @(x0,d,v) prox_rank1_l1( x0, d, v, lambda );
or, for 4 arguments,
prox = @(x0,d,v,varargin) prox_rank1_l1( x0, d, v, lambda, [], varargin{:} );
"opts" is a dictionary with additional options
opts = {'tol': 1e-6, 'grad_tol' : 1e-6, 'nmax' : 1000, 'N' : N, 'L': normQ, 'verbose': 25}
- 'tol': final tolerance in function
- 'grad_tol': final tolerance in gradient
- 'nmax': maximum number of iterations
- 'N': size of signal (optional)
- 'x0': initial estimation of the signal (optional)
- 'L': estimation of the Lipschitz constant (or diagonal scaling)
- 'verbose': step size for the printing (=0 no printing)
Stephen Becker and Jalal Fadili, Nov 24 2011 -- Dec 2012
Copied from zeroSR1.m Dec 11 2012
Feb 28 2014, unnesting all functions to make compatible with octave.
See also proximalGradient.m
Python version directly translated from Matlab version (including comments): A. Asensio Ramos (March 12, 2015)
"""
start = datetime.datetime.now()
if (('N' in options) & ('x0' not in options)):
N = options['N']
xk = np.zeros((N,1))
elif (('N' not in options) & ('x0' in options)):
xk = x0.copy()
N = len(xk)
else:
print "I have no way to know the size of the signal to retrieve. Please, set options['N'] or options['x0']"
sys.exit(1)
maxStag = 10
SR1 = True
BB = True
nMax = options['nmax']
L = options['L']
Sigma = 1
BB_type = 2
if ((SR1) & (BB_type == 1)):
print("zeroSR1:experimental - With zero-memory SR1, BB_type=1 is an untested feature")
Sigma = -1
SR1_diagWeight = 0.8*(BB_type==2) + 1.0*(BB_type==1)
if ((SR1) & (BB_type == 2) & (SR1_diagWeight > 1)):
Sigma = -1
skipBB = False
stag = 0
fxOld = np.inf
t = 1.0 / L
stepSizes = np.zeros((nMax,1+SR1))
# Initialization
xk_old = xk
f, gradient = fcnGrad(xk)
f_xk = np.empty([])
gradientOld = gradient.copy()
# Begin algorithm
for nIteration in range(nMax):
# "sk" and "yk" are the vectors that will give us quasi-Newton
# information (and also used in BB step, since that can be
# seen as a quasi-Newton method)
sk = xk - xk_old
yk = gradient - gradientOld
if ((nIteration > 0) & (np.linalg.norm(yk) < 1e-13)):
print("zeroSR1:zeroChangeInGradient. Gradient isn't changing, try changing L")
yk = np.asarray([])
skipBB = True
# Find and initial stepsize
if ((BB) & (nIteration > 0) & (not skipBB)):
if (BB_type == 1):
t = np.linalg.norm(sk)**2 / (sk.T.dot(yk)) # eq (1.6) in Dai/Fletcher. This is longer
else:
t = sk.T.dot(yk) / np.linalg.norm(yk)**2 # eq (1.7) in Dai/Fletcher. This is shorter
if (t < 1e-14):
print("Curvature condition violated!")
stag = np.inf
if (SR1):
# we canot take a full BB step, otherwise we exactly satisfy the secant
# equation, and there is no need for a rank-1 correction.
t = SR1_diagWeight*t # SR1_diagWeights is a scalar less than 1 like 0.6
H0 = lambda x: t*x
diagH = t*np.ones((N,1))
else:
t = 1.0 / L
H0 = lambda x: t*x
diagH = t*np.ones((N,1))
skipBB = False
stepSizes[nIteration,0] = t
# ---------------------------------------------------------------------
# -- Quasi-Newton -- Requires: H0, and builds H
# ---------------------------------------------------------------------
if ((SR1) & (nIteration > 0) & (yk.size != 0)):
gs = yk.T.dot(sk)
if (gs < 0):
print("Serious curvature condition problem!")
stag = np.inf
H0 = lambda x: diagH * x
vk = sk - H0(yk)
vkyk = vk.T.dot(yk)
Sigma_local = np.sign(vkyk[0])
if ((Sigma_local * vkyk) <= 0):
print("Warning: violated curvature conditions")
vk = []
H = H0
stepSizes[nIteration,1] = 0
else:
vk /= np.sqrt(Sigma_local * vkyk)
H = lambda x: H0(x) + Sigma_local * vk.dot(vk.T.dot(x))
stepSizes[nIteration,1] = vk.T.dot(vk)
else:
Sigma_local = Sigma
H = H0
vk = []
# ---------------------------------
# Make the proximal update
# ---------------------------------
p = H(-gradient) # Scaled descent direction. H includes the stepsize
xk_old = xk.copy()
if (Sigma_local != 1):
xk = prox(xk_old + p, diagH, vk, Sigma_local)
else:
xk = prox(xk_old + p, diagH, vk)
norm_grad = np.linalg.norm(xk - xk_old)
if ( (np.any(np.isnan(xk))) | (np.linalg.norm(xk) > 1.e10)):
stag = np.inf
xk = xk_old
print("Prox algorithm failed, probably due to numerical cancellations")
# Update function and gradient
gradientOld = gradient.copy()
f_xk, gradient = fcnGrad(xk)
fx = f_xk + h(xk)
df = np.abs(fx - fxOld) / np.abs(fxOld)
fxOld = fx.copy()
# Print iteration and test for stopping
if ((df < options['tol']) | (t < 1e-10) | (np.any(np.isnan(fx))) | (norm_grad < options['grad_tol'])):
stag += 1
if ((options['verbose'] != 0)):
if (((nIteration+1) % options['verbose'] == 0) | (stag > maxStag)):
try:
print "Iter: {0:5d}, f: {1:.3e}, df: {2:.2e}, ||grad||: {3:.2e}, step: {4:.2e}".format(nIteration+1, fx, df, norm_grad, t[0,0])
except:
print "Iter: {0:5d}".format(nIteration+1)
if (stag > maxStag):
delta = datetime.datetime.now() - start
print "Quitting. Reached tolerance. Ellapsed time: {0:2f} s".format(delta.total_seconds())
break
return xk, nIteration, stepSizes | {
"repo_name": "aasensio/pyzeroSR1",
"path": "pyzeroSR1/zeroSR1.py",
"copies": "1",
"size": "6583",
"license": "mit",
"hash": 7341081257128205000,
"line_mean": 29.7663551402,
"line_max": 132,
"alpha_frac": 0.6041318548,
"autogenerated": false,
"ratio": 2.6935351882160394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.804814828910998,
"avg_score": 0.149903750781212,
"num_lines": 214
} |
# Almost complete solution to Advent of Code 2017 Problem 9.
from mini_parse import exact, Grammar, string_grammar, one_char, none_char, always, foldl
parser = Grammar(string_grammar)
parser.main = parser.garbage | exact('{').and_second(
parser.main.join(parser.comma).optional([])).and_first(exact('}'))
parser.comma = exact(',')
parser.garbage = exact('<').and_second(parser.char.times().concat_all()).and_first(exact('>'))
parser.char = none_char('!>') | ((exact('!') + one_char()) >> always(''))
# Now let's do parts 1 and 2 with a parser for fun. We can reuse stuff from
# our old parser, of course.
# Part 1 is tricky. Ultimately it's probably just simpler to recurse
# through a list, but if you don't want to do that, you can go into
# a mess of lambdas and >>.
parser_1 = Grammar(string_grammar)
parser_1.main = (parser.garbage >> always(always(0))) | (exact('{').and_second(
parser_1.main.join(parser.comma).optional([])).and_first(exact('}'))) >> \
(lambda l: lambda d: d + sum(i(d + 1) for i in l))
# With Part 2, on the other hand, it makes sense to use a parser.
parser_2 = Grammar(string_grammar)
parser_2.main = parser_2.garbage | (exact('{').and_second(
parser_2.main.join(parser.comma).optional([])).and_first(exact('}')) >> sum)
parser_2.garbage = parser.garbage >> len
# Alternatively...
def part_1(p, x):
return x + sum(part_1(i, x + 1) for i in p if type(i) == list)
def part_2(x):
return len(p) if type(p) == str else sum(g_score(i) for i in p)
# Or alternatively, if you are against recursion...
parser_3 = Grammar(string_grammar)
depth = {'{': 1, '}': -1, ',': 0}
update = {'{': True, '}': False, ',': False}
fold_f = lambda c, new: (c[0], c[1], c[2] + new) if type(new) == int else \
(c[0] + depth[new], c[1] + c[0] * update[new], c[2])
parser_3.main = \
((parser.garbage >> len) | exact('{') | exact('}') | exact(',')).times() >> \
(lambda x: foldl(fold_f, (1, 0, 0), x)[1:])
| {
"repo_name": "dan-simon/mini-parse",
"path": "stream_processing.py",
"copies": "1",
"size": "1951",
"license": "mit",
"hash": -3091177606665218600,
"line_mean": 32.6379310345,
"line_max": 94,
"alpha_frac": 0.6253203485,
"autogenerated": false,
"ratio": 2.8607038123167157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3986024160816716,
"avg_score": null,
"num_lines": null
} |
#Almost equilateral triangles
#Problem 94
#It is easily proved that no equilateral triangle exists with integral length
#sides and integral area. However, the almost equilateral triangle 5-5-6 has an
# area of 12 square units.
#
#We shall define an almost equilateral triangle to be a triangle for which two
#sides are equal and the third differs by no more than one unit.
#
#Find the sum of the perimeters of all almost equilateral triangles with integral
#side lengths and area and whose perimeters do not exceed one billion (1,000,000,000).
#x = equal sides, y = different side
import math
def isPerfectSquare(n):
x = int(math.sqrt(n))
return x * x == n
def isAlmostIsosceles(x, y):
#a = math.sqrt(y*y * (x*x - y*y/4) / 4)
#a2 = y*y * (x*x - y*y/4) / 4
#a2times4 = y*y * (x*x - y*y/4)
a2times16 = y*y * (4*x*x - y*y)
if a2times16 % 16 != 0:
return False
a2 = a2times16 // 16
return isPerfectSquare(a2)
def euler94(n):
sum = 0
i = 5
while i <= n:
if isAlmostIsosceles(i, i - 1):
print (i, i, i-1, sum)
p = 3 * i - 1
sum += p
if i <= 100:
i *= 3
elif i <= 10000:
i = int(i * 3.7)
elif i <= 1000000:
i = int(i * 3.732)
else:
i = int(i * 3.73205)
if isAlmostIsosceles(i, i + 1):
print (i, i, i+1, sum)
p = 3 * i + 1
sum += p
if i <= 100:
i *= 3
elif i <= 10000:
i = int(i * 3.7)
elif i <= 1000000:
i = int(i * 3.732)
else:
i = int(i * 3.73205)
i += 1
return sum
#print(isAlmostIsosceles(5,6))
#print(euler94(333333333))
print(euler94(100000000000))
| {
"repo_name": "feliposz/project-euler-solutions",
"path": "python/euler94.py",
"copies": "1",
"size": "1836",
"license": "mit",
"hash": 2353011293944688000,
"line_mean": 26.8181818182,
"line_max": 86,
"alpha_frac": 0.5217864924,
"autogenerated": false,
"ratio": 3.06,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40817864923999997,
"avg_score": null,
"num_lines": null
} |
"""A local settings template.
Be careful changing this file as it will affect all development users.
"""
import fnmatch
# * imports should really never be used. Given this is passing settings around,
# this particular setup is getting a special pass.
from default import *
# local settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Set this to wherever you want to connect to. It is currently setup to
# run against sqlite.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
# For linux/mac hosts.
'NAME': '/tmp/dev.db',
# If you are working in windows, setup with a writable directory path.
# 'NAME': 'C:\TEMP\dev.db',
}
}
if DEBUG:
# set INTERNAL_IPS to entire local network
class CheckLocalNetwork(list):
def __contains__(self, key):
for address in self:
if fnmatch.fnmatch(key, address):
return True
return False
INTERNAL_IPS = CheckLocalNetwork(['127.0.0.1', '192.168.*.*'])
# Additive installed apps.
INSTALLED_APPS += (
)
# Additive middleware classes
MIDDLEWARE_CLASSES += (
)
| {
"repo_name": "iamkelroy/CS673_G1_T3_BUGTRACKER",
"path": "issue_tracker/settings/local.py",
"copies": "1",
"size": "1159",
"license": "mit",
"hash": 3294472964532480500,
"line_mean": 24.1956521739,
"line_max": 79,
"alpha_frac": 0.6289905091,
"autogenerated": false,
"ratio": 4.0524475524475525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
"""A local settings template."""
import fnmatch
# * imports should really never be used. Given this is passing settings around,
# this particular setup is getting a special pass.
from default import *
def _CheckLocalNetwork(addresses):
"""Check whether the current IP matches the addresses provided.
Args:
addresses: A list of IP addresses which can be exanded via unix fnmatch.
Returns:
Boolean as to whether the current IP matches the provided IPs in the
list.
"""
for address in self:
if fnmatch.fnmatch(key, address):
return True
return False
# local settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Set this to wherever you want to connect to. It is currently setup to
# run against sqlite.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/dev.db',
}
}
if DEBUG:
INTERNAL_IPS = _CheckLocalNetwork(['127.0.0.1', '192.168.*.*'])
# Additive installed apps for debugging.
INSTALLED_APPS += (
)
# Additive middleware classes for debugging.
MIDDLEWARE_CLASSES += (
)
| {
"repo_name": "iamkelroy/CS673_G1_T3_BUGTRACKER",
"path": "issue_tracker/settings/local_template.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": 1883811751080711200,
"line_mean": 23.4130434783,
"line_max": 79,
"alpha_frac": 0.659839715,
"autogenerated": false,
"ratio": 4.113553113553113,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5273392828553113,
"avg_score": null,
"num_lines": null
} |
""" A location-aware script to manage ringer volume """
__author__ = 'Marco Bonifacio <bonifacio.marco@gmail.com>'
__license__ = 'MIT License'
import android
import time
# Parameters
SSID = {'bonix-lan': 'casa',
'ZENIT SECURED WPA': 'lavoro'}
RINGER = {'casa': 5,
'lavoro': 2,
'sconosciuto': 5}
# Functions
def check_ssid(droid):
""" Check if wireless network SSID is known.
Args:
droid: an Android instance.
Returns:
a string representing a known or unknown environment. """
state = 'sconosciuto'
try:
lwifi = droid.wifiGetScanResults().result
lssid = [w['ssid']for w in lwifi]
for s in lssid:
if s in SSID:
state = SSID[s]
except Exception, e:
droid.notify('PyLocale', 'Errore: {}'.format(e))
finally:
return(state)
def check_state(droid, state, stateold):
""" Check if environment has changed.
Args:
droid: an Android instance.
state: a string, the present state.
stateold: a string, the former state.
Returns:
a binary true if environment has changed. """
if state != stateold:
droid.vibrate()
if state != 'sconosciuto':
droid.makeToast('Sei a {}'.format(state))
else:
droid.makeToast('Sei uscito da {}'.format(stateold))
return(True)
else:
return(False)
def set_ringer(droid, state):
""" Set the ringer volume depending on state.
Args:
droid: an Android instance.
state: a string, the present state.
Returns:
nothing. """
droid.setRingerVolume(RINGER[state])
droid.makeToast('Volume: {}'.format(RINGER[state]))
if __name__ == '__main__':
droid = android.Android()
state = 'sconosciuto'
while True:
stateold = state
state = check_ssid(droid)
changed = check_state(droid, state, stateold)
if changed is True:
set_ringer(droid, state)
time.sleep(300) | {
"repo_name": "mbonix/yapr",
"path": "android/PyLocale.py",
"copies": "1",
"size": "2023",
"license": "mit",
"hash": 3825181656120846300,
"line_mean": 27.1111111111,
"line_max": 65,
"alpha_frac": 0.5872466634,
"autogenerated": false,
"ratio": 3.5060658578856154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4593312521285615,
"avg_score": null,
"num_lines": null
} |
"""A "Location" is a collecion of responses from geocoding services, each one a
distinct attempt to either find a string address given a point (reverse geocode)
or an attempt to find a point that best matches a string address (forward
geocode). A Location is a collection, because fundamentally *ErrorGeoPy* is
oriented to working across providers, and considering all of their results as a
related set of responses.
A "LocationClusters" object, also defined here, is also a collection of
addresses. but is slightly less abstract in that the members of the collection
are organised into clusters, based on some clustering algorithm.
Heavy use is made of shapely in return values of methods for these classes.
.. moduleauthor Richard Law <richard.m.law@gmail.com>
"""
from functools import wraps
import geopy
from shapely.geometry import MultiPoint, GeometryCollection
from shapely.ops import transform
from errorgeopy import utils
def _check_points_exist(func):
"""Decorator for checking that the first argument of a function has a
points property.
"""
@wraps(func)
def inner(*args, **kwargs):
if not args[0].points:
return None
else:
return func(*args, **kwargs)
return inner
def _check_polygonisable(func):
"""Decorator for checking that the first argument of a function has a method
called "_polgonisable" that takes no methods, and returns True.
"""
@wraps(func)
def inner(*args, **kwargs):
if not args[0]._polygonisable():
return None
else:
return func(*args, **kwargs)
return inner
def _check_concave_hull_calcuable(func):
"""Decorator for checking that there are enough candidates to compute a
concave hull.
"""
@wraps(func)
def inner(*args, **kwargs):
if len(args[0]) < 4:
return None
else:
return func(*args, **kwargs)
return inner
def _check_convex_hull_calcuable(func):
"""Decorator for checking that there are enough candidates to compute a
concave hull.
"""
@wraps(func)
def inner(*args, **kwargs):
if len(args[0]) < 3:
return None
else:
return func(*args, **kwargs)
return inner
def _check_cluster_calculable(func):
"""Decorator for checking that there are enough locations for calculating
clusters (mininum of 2).
"""
@wraps(func)
def inner(*args, **kwargs):
if len(args[0]._location) < 3:
return []
else:
return func(*args, **kwargs)
return inner
class Location(object):
"""Represents a collection of parsed geocoder responses, each of which
are geopy.Location objects, representing the results of different
geocoding services for the same query.
"""
@utils.check_location_type
def __init__(self, locations):
self._locations = locations or []
def __unicode__(self):
return '\n'.join(self.addresses)
def __str__(self):
return self.__unicode__()
def __repr__(self):
return '\n'.join([repr(l) for l in self._locations])
def __getitem__(self, index):
return self._locations[index]
def __setitem__(self, index, value):
if not isinstance(value, geopy.Location):
raise TypeError
self.locations[index] = value
def __eq__(self, other):
if not isinstance(other, Location):
return False
for l, o in zip(self.locations, other):
if not l == o:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._locations)
def _polygonisable(self):
if not self._locations or len(self.locations) <= 1:
return False
return True
@property
def locations(self):
"""A sequence of geopy.Location objects.
"""
if not isinstance(self._locations, list):
return [self._locations]
else:
return self._locations
@property
def addresses(self):
"""geopy.Location.address properties for all candidate locations as a
sequence of strings.
"""
return [l.address for l in self.locations]
@property
def points(self):
"""An array of geopy.Point objects representing the candidate locations
physical positions. These are geodetic points, with latitude, longitude,
and altitude (in kilometres, when supported by providers; defaults to 0.
"""
return [l.point for l in self.locations]
@property
@_check_points_exist
def multipoint(self):
"""A shapely.geometry.MultiPoint of the Location members.
"""
return MultiPoint(self._shapely_points())
@property
@_check_points_exist
def centroid(self):
"""A shapely.geometry.Point of the centre of all candidate address
locations (centre of the multipoint).
"""
return self.multipoint.centroid
@property
@_check_points_exist
def most_central_location(self):
"""A shapely.geometry.Point representing the geometry of the candidate
location that is nearest to the geometric centre of all of the candidate
locations.
"""
return utils.point_nearest_point(self._shapely_points(), self.centroid)
@property
@_check_points_exist
def mbc(self):
"""A shapely.geometry.Polygon representing the minimum bounding circle
of the candidate locations.
"""
return utils.minimum_bounding_circle(
[p[0:2] for p in self._tuple_points()])
@property
@_check_concave_hull_calcuable
@_check_polygonisable
def concave_hull(self, alpha=0.15):
"""A concave hull of the Location, as a shapely.geometry.Polygon object.
Needs at least four candidates, or else this property is None.
Kwargs:
alpha (float): The parameter for the alpha shape
"""
return utils.concave_hull([p[0:2] for p in self._tuple_points()],
alpha)
@property
@_check_convex_hull_calcuable
@_check_polygonisable
def convex_hull(self):
"""A convex hull of the Location, as a shapely.geometry.Polygon
object. Needs at least three candidates, or else this property is None.
"""
return utils.convex_hull(self._tuple_points())
@property
@_check_points_exist
def clusters(self):
"""Clusters that have been identified in the Location's candidate
addresses, as an errorgeopy.location.LocationClusters object.
"""
return LocationClusters(self)
def _shapely_points(self, epsg=None):
if epsg:
projection = utils.get_proj(epsg)
points = [transform(projection, p) for p in self.points]
return utils.array_geopy_points_to_shapely_points(self.points)
def _tuple_points(self, epsg=None):
if epsg:
projection = utils.get_proj(epsg)
points = [transform(projection, p) for p in self.points]
return utils.array_geopy_points_to_xyz_tuples(self.points
if not epsg else points)
# TODO it'd be nice to have the names of the geocoder that produced each cluster member; this would require extending geopy.Location to include this information
class LocationClusters(object):
"""Represents clusters of addresses identified from an errorgeopy.Location
object, which itself is one coherent collection of respones from multiple
geocoding services for the same query.
"""
def __init__(self, location):
self._location = location
def __len__(self):
return len(self.clusters)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return '\n'.join([str(c.location) for c in self.clusters])
def __getitem__(self, index):
return self.clusters[index]
@property
@_check_cluster_calculable
def clusters(self):
"""A sequence of clusters identified from the input. May have length 0
if no clusters can be determined.
"""
return utils.get_clusters(self._location, Location)
@property
def geometry_collection(self):
"""GeometryCollection of clusters as multipoint geometries.
"""
return GeometryCollection(
[c.location.multipoint for c in self.clusters])
@property
def cluster_centres(self):
"""Multipoint of cluster geometric centroids.
"""
return MultiPoint([c.centroid for c in self.clusters])
| {
"repo_name": "alpha-beta-soup/errorgeopy",
"path": "errorgeopy/location.py",
"copies": "1",
"size": "8754",
"license": "mit",
"hash": -9085602439079721000,
"line_mean": 29.2906574394,
"line_max": 160,
"alpha_frac": 0.6320539182,
"autogenerated": false,
"ratio": 4.251578436134046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5383632354334046,
"avg_score": null,
"num_lines": null
} |
"""A location is a place within the game board that has a row and column
Locations are used to reference different board elements"""
def make_location(row, column):
"""This method creates a location with a given row and column"""
return (row, column)
def get_row(location):
"""Gets the row of a location"""
return location[0]
def get_column(location):
"""Gets the column of a location"""
return location[1]
def get_translate(location, rows, columns):
"""Gets a new location that is location translated rows and columns."""
return make_location(get_row(location) + rows, get_column(location) + columns);
def get_adjacent(location):
"""Gets the eight adjacent spots to a location"""
row = get_row(location)
column = get_column(location)
return [make_location(row - 1, column - 1), make_location(row - 1, column),
make_location(row - 1, column + 1), make_location(row, column + 1),
make_location(row + 1, column + 1), make_location(row + 1, column),
make_location(row + 1, column - 1), make_location(row, column - 1)]
def get_adjacent_within_bounds(location, max_row, max_column, min_row = 0, min_column = 0):
"""Gets the adjacent locations filtering out those that are not within
bounds as defined in is_within_bounds."""
return [loc for loc in get_adjacent(location) if is_within_bounds(loc, \
max_row, max_column, min_row, min_column)]
def get_orthogonal(location):
"""Gets the four orthogonally adjacent spots to a location"""
row = get_row(location)
column = get_column(location)
return [make_location(row - 1, column), make_location(row, column + 1),
make_location(row + 1, column), make_location(row, column - 1)]
def get_double_orthogonal(location):
"""Gets the four orthogonal spots that are 2 squares away from the location
This is used for the well special points"""
row = get_row(location)
column = get_column(location)
return [make_location(row - 2, column), make_location(row, column + 2),
make_location(row + 2, column), make_location(row, column - 2)]
def get_orthogonal_within_bounds(location, max_row, max_column, min_row = 0, min_column = 0):
"""Gets the orthogonally adjacent locations filtering out those that are not
within bounds as defined in is_within_bounds."""
return [loc for loc in get_orthogonal(location) if is_within_bounds(loc, \
max_row, max_column, min_row, min_column)]
def is_within_bounds(location, max_row, max_column, min_row = 0, min_column = 0):
"""Checks if a location is within a specified bounds (inclusive on min
bound and exclusive on max bound)
min_row <= row < max_row and
min_column <= column < max_column"""
return get_row(location) >= min_row and get_row(location) < max_row and \
get_column(location) >= min_column and get_column(location) < max_column
| {
"repo_name": "nicholas-maltbie/Medina",
"path": "Location.py",
"copies": "1",
"size": "2990",
"license": "mit",
"hash": -5143512161455073000,
"line_mean": 45.4603174603,
"line_max": 93,
"alpha_frac": 0.6618729097,
"autogenerated": false,
"ratio": 3.705080545229244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.982132584310702,
"avg_score": 0.009125522364444501,
"num_lines": 63
} |
"""A location is the representation of a given point on a glove."""
import json
class Location(object):
"""A representation of a given point on a globe."""
def __init__(self, x_coord, y_coord):
self._x_coord = x_coord
self._y_coord = y_coord
self._plant = 0
@property
def x(self): # pylint: disable=invalid-name
"""The abscissa of the locations coordinate.
:rtype: int
"""
return self._x_coord
@property
def y(self): # pylint: disable=invalid-name
"""The ordinate of the location coordinate
:rtype: int"""
return self._y_coord
@property
def coordinate(self):
"""The coordinate of a given location.
:rtype: tuple (int, int)"""
return self.x, self.y
@property
def plant(self):
"""Set 1 if there is plant life, else set to 0."""
return self._plant
@plant.setter
def plant(self, plant):
"""Set if a given location has a plant."""
self._plant = plant
def __repr__(self):
return json.dumps({'x': self.x, 'y': self.y, 'plant': self.plant})
def __str__(self):
return self.__repr__()
| {
"repo_name": "neoinsanity/existenz",
"path": "existenz/location.py",
"copies": "1",
"size": "1198",
"license": "apache-2.0",
"hash": 3001780642927884300,
"line_mean": 23.4489795918,
"line_max": 74,
"alpha_frac": 0.5634390651,
"autogenerated": false,
"ratio": 3.8274760383386583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890915103438658,
"avg_score": 0,
"num_lines": 49
} |
"""A logger. Because apparently the default one isn't good enough."""
from collections import defaultdict
import datetime
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S%z"
DEFAULT_FORMAT = "[{datetime}] {level}: {message}"
DEFAULT_LEVEL = "INFO"
NEWLINE = '\n'
LEVELS = {"NOTSET": 00,
"DEBUG": 10,
"INFO": 20,
"NOTICE": 30,
"WARNING": 40,
"ERROR": 50,
"CRITICAL": 60}
class Writer(object):
def __init__(self, output, tags=None, level=DEFAULT_LEVEL,
format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT):
self.output = output
self.tags = tags if tags is not None else ['*']
self.level = level
self.int_level = LEVELS.get(level, 0)
self.format = format
self.date_format = date_format
def write(self, line):
self.output.write(line)
self.output.write(NEWLINE)
self.output.flush()
def _do_write(self, message):
line = self._pre_write(message)
self.write(line)
def _pre_write(self, message):
args = message.args()
args['datetime'] = args['datetime'].strftime(self.date_format)
line = self.format.format(**args)
return line
## IRC ERRORS:
class NoHandlerError(NotImplementedError):
pass
class IRCWriter(Writer):
def __init__(self, output, tags=None, level=DEFAULT_LEVEL,
format=DEFAULT_FORMAT, date_format=DEFAULT_DATE_FORMAT,
irc_handler=None):
Writer.__init__(self, output, tags, level, format, date_format)
self.irc_handler = None
def write(self, line):
if self.irc_handler is None:
raise NoHandlerError
self.irc_handler.send_message(self.output, message)
def add_irc_handler(self, handler):
self.irc_handler = handler
class Message(object):
def __init__(self, message, level=DEFAULT_LEVEL,
tags=None, *args, **kwargs):
self.tags = [] if tags is None else tags
self.raw_message = message
self.message = message.format(*args, **kwargs)
self.level = level
self.datetime = datetime.datetime.today()
def args(self):
new_dict = {}
new_dict.update(self.__dict__)
return new_dict
class Logger(object):
instances = {}
def __new__(cls, name="k-eight", *args, **kwargs):
if name in cls.instances:
return cls.instances[name]
else:
new = object.__new__(cls, *args, **kwargs)
new.name = name
cls.instances[name] = new
return new
def __init__(self, name="k-eight", writers=None):
if not hasattr(self, 'writers'):
self.writers = [] if writers is None else writers
def log(self, message, tags=None, level=DEFAULT_LEVEL, *args, **kwargs):
message = Message(message, level, tags, *args, **kwargs)
if tags is None:
for writer in self.writers:
if '*' in writer.tags:
if writer.int_level <= LEVELS.get(message.level, 0):
writer._do_write(message)
tags = []
for tag in tags:
for writer in self.writers:
if tag in writer.tags or '*' in writer.tags:
if writer.int_level <= LEVELS.get(message.level, 0):
writer._do_write(message)
def debug(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="DEBUG", *args, **kwargs)
def info(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="INFO", *args, **kwargs)
def notice(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="NOTICE", *args, **kwargs)
def warning(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="WARNING", *args, **kwargs)
def error(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="ERROR", *args, **kwargs)
def critical(self, message, tags=None, *args, **kwargs):
self.log(message, tags, level="CRITICAL", *args, **kwargs)
def add_writers(self, *writers):
self.writers.extend(writers)
def add_writer(self, writer):
self.writers.append(writer) | {
"repo_name": "MrJohz/K-Eight",
"path": "tools/log.py",
"copies": "1",
"size": "4401",
"license": "bsd-2-clause",
"hash": 6447473397821390000,
"line_mean": 32.6030534351,
"line_max": 76,
"alpha_frac": 0.5655532833,
"autogenerated": false,
"ratio": 3.81038961038961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.487594289368961,
"avg_score": null,
"num_lines": null
} |
""" a ``logging`` Formatter that escapes newline chars to avoid CRLF log injection (CWE-93)
Defines the class ``AntiCrlfFormatter``
"""
from __future__ import unicode_literals
import logging
import warnings
from anticrlf.types import SubstitutionMap
class LogFormatter(logging.Formatter):
"""logging Formatter to sanitize CRLF errors (CWE-93)
This class is a drop-in replacement for ``logging.Formatter``, and has the
exact same construction arguments. However, as a final step of formatting a
log line, it escapes carriage returns (\r) and linefeeds (\n).
By default, these are replaced with their escaped equivalents (see `Examples`_),
but the ``replacements`` dictionary can be modified to change this behabior.
Examples:
::
import anticrlf
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(anticrlf.LogFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.info("Example text with a newline\nhere")
This results in::
2017-02-03 08:43:52,557 - __main__ - INFO - Example text with a newline\nhere
Whereas with the default ``Formatter``, it would be::
2017-02-03 08:43:52,557 - __main__ - INFO - Example text with a newline
here
If you wanted newlines to be replaced with \x0A instead, you could::
formatter = anticrlf.LogFormatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter.replacements["\n"] = "\\x0A" # Note the double backslash for literal!
handler.setFormatter(formatter)
"""
def __init__(self, fmt=None, datefmt=None):
super(self.__class__, self).__init__(fmt=fmt, datefmt=datefmt)
self.replacements = SubstitutionMap() # defaults to mapping \n: \\n and \r: \\r
def format(self, record):
"""calls logger.Formatter.format, then removes CR and LF from the resulting message before returning it"""
if type(self.replacements) != SubstitutionMap:
warnings.warn(UserWarning("replacements invalid: resetting to defaults"))
self.replacements = SubstitutionMap()
formatted_message = super(self.__class__, self).format(record)
for repl in self.replacements:
formatted_message = formatted_message.replace(repl, self.replacements[repl])
return formatted_message
| {
"repo_name": "darrenpmeyer/logging-formatter-anticrlf",
"path": "anticrlf/__init__.py",
"copies": "1",
"size": "2553",
"license": "bsd-2-clause",
"hash": 5895916451900739000,
"line_mean": 36.5441176471,
"line_max": 114,
"alpha_frac": 0.6529573051,
"autogenerated": false,
"ratio": 4.199013157894737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001915830493532671,
"num_lines": 68
} |
"""A logging module for use with elsapy.
Additional resources:
* https://github.com/ElsevierDev/elsapy
* https://dev.elsevier.com
* https://api.elsevier.com"""
import time, logging
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
## Following adapted from https://docs.python.org/3/howto/logging-cookbook.html
def get_logger(name):
# TODO: add option to disable logging, without stripping logger out of all modules
# - e.g. by simply not writing to file if logging is disabled. See
# https://github.com/ElsevierDev/elsapy/issues/26
# create logger with module name
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create log path, if not already there
logPath = Path('logs')
if not logPath.exists():
logPath.mkdir()
# create file handler which logs even debug messages
fh = logging.FileHandler('logs/elsapy-%s.log' % time.strftime('%Y%m%d'))
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info("Module loaded.")
return logger | {
"repo_name": "ElsevierDev/elsapy",
"path": "elsapy/log_util.py",
"copies": "1",
"size": "1504",
"license": "bsd-3-clause",
"hash": 7153691644284171000,
"line_mean": 34.7317073171,
"line_max": 89,
"alpha_frac": 0.673537234,
"autogenerated": false,
"ratio": 3.926892950391645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008515154360183783,
"num_lines": 41
} |
""" a login and sign up tab widget"""
from PyQt5.QtWidgets import QTabWidget, QFormLayout, QLineEdit, QPushButton, QWidget
class LoginSignup(QTabWidget):
def __init__(self, width, hight, parent=None):
super(LoginSignup, self).__init__(parent)
# set width and hight
self.width = width
self.hight = hight
# TODO: forgot password?
# declare page
self.signUpPage = QWidget()
self.loginPage = QWidget()
# declare LineEdit
self.signUpID = QLineEdit()
self.signUpPW = QLineEdit()
self.signUpPW.setEchoMode(QLineEdit.Password)
self.loginID = QLineEdit()
self.loginPW = QLineEdit()
self.loginPW.setEchoMode(QLineEdit.Password)
# declare check button
self.signUPB = QPushButton("Sign Up", self.signUpPage)
self.loginB = QPushButton("Login", self.loginPage)
self.initUI()
def initUI(self):
# set LoginSignup size
self.setFixedSize(self.width, self.hight)
# set signUP dialog
signUp = QFormLayout()
signUp.addRow("User ID", self.signUpID)
signUp.addRow("Password", self.signUpPW)
# set login dialog
login = QFormLayout()
login.addRow("User ID", self.loginID)
login.addRow("Password", self.loginPW)
# add layout
self.signUpPage.setLayout(signUp)
self.loginPage.setLayout(login)
# move signup and login button
self.signUPB.move(self.width / 2 - 45, self.hight - 70)
self.loginB.move(self.width / 2 - 45, self.hight - 70)
# add tabs
self.addTab(self.signUpPage, "Sign Up")
self.addTab(self.loginPage, "Login")
| {
"repo_name": "whuang001/cts",
"path": "gui/LoginSignup.py",
"copies": "1",
"size": "1721",
"license": "mit",
"hash": -3055161290917496300,
"line_mean": 29.1929824561,
"line_max": 84,
"alpha_frac": 0.6147588611,
"autogenerated": false,
"ratio": 3.607966457023061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47227253181230605,
"avg_score": null,
"num_lines": null
} |
"""A login frame."""
import application, wx
from simpleconf.dialogs.wx import SimpleConfWxDialog
from config import config
from gmusicapi.exceptions import AlreadyLoggedIn
class LoginFrame(SimpleConfWxDialog):
def __init__(self, callback):
self.callback = callback
super(LoginFrame, self).__init__(config.login)
self.Bind(wx.EVT_CLOSE, self.on_close)
def on_close(self, event):
"""The window is about to close, reset application.logging_in."""
application.logging_in = False
if application.stream:
application.stream.set_position(max(0, application.stream.get_position() - 1))
application.frame.SetTitle()
application.stream.stop()
event.Skip()
def on_ok(self, event):
"""Try to login."""
if super(LoginFrame, self).on_ok(event):
uid = self.controls['uid'].GetValue()
pwd = self.controls['pwd'].GetValue()
remember = self.controls['remember'].GetValue()
if not remember:
self.section['uid'] = ''
self.section['pwd'] = ''
try:
res = application.api.login(uid, pwd, application.api.FROM_MAC_ADDRESS)
except AlreadyLoggedIn:
return self.on_error('You are already logged in.')
if not res:
self.on_error('Failed to login.')
else:
self.callback()
| {
"repo_name": "chrisnorman7/gmp3",
"path": "gui/login_frame.py",
"copies": "1",
"size": "1264",
"license": "mpl-2.0",
"hash": 6006298768973509000,
"line_mean": 30.4102564103,
"line_max": 81,
"alpha_frac": 0.6764240506,
"autogenerated": false,
"ratio": 3.632183908045977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48086079586459773,
"avg_score": null,
"num_lines": null
} |
# A lot of failures in these tests on Mac OS X.
# Byte order related?
import os, sys, unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.assertEqual(self._dll.tf_b(-126), -42)
self.assertEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.assertEqual(self._dll.tf_bb(0, -126), -42)
self.assertEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.assertEqual(self._dll.tf_B(255), 85)
self.assertEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.assertEqual(self._dll.tf_bB(0, 255), 85)
self.assertEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.assertEqual(self._dll.tf_h(-32766), -10922)
self.assertEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.assertEqual(self._dll.tf_bh(0, -32766), -10922)
self.assertEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.assertEqual(self._dll.tf_H(65535), 21845)
self.assertEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.assertEqual(self._dll.tf_bH(0, 65535), 21845)
self.assertEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.assertEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.assertEqual(self._dll.tf_I(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.assertEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.assertEqual(self._dll.tf_l(-2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.assertEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.assertEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.assertEqual(self._dll.tf_L(4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.assertEqual(self._dll.tf_bL(' ', 4294967295), 1431655765)
self.assertEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.assertEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.assertEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.assertEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.assertEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.assertEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.assertEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.assertEqual(self._dll.tf_f(-42.), -14.)
self.assertEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.assertEqual(self._dll.tf_bf(0, -42.), -14.)
self.assertEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.assertEqual(self._dll.tf_d(42.), 14.)
self.assertEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.assertEqual(self._dll.tf_bd(0, 42.), 14.)
self.assertEqual(self.S(), 42)
@unittest.skipIf(sys.platform=='cli' and os.name=='posix', 'Long doubles - https://github.com/IronLanguages/ironpython2/issues/408')
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.assertEqual(self._dll.tf_D(42.), 14.)
self.assertEqual(self.S(), 42)
@unittest.skipIf(sys.platform=='cli' and os.name=='posix', 'Long doubles - https://github.com/IronLanguages/ironpython2/issues/408')
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.assertEqual(self._dll.tf_bD(0, 42.), 14.)
self.assertEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.assertEqual(self._dll.tf_i(42), 28)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tf_i(-42), -28)
self.assertEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.assertEqual(self._dll.tv_i(42), None)
self.assertEqual(self.S(), 42)
self.assertEqual(self._dll.tv_i(-42), None)
self.assertEqual(self.S(), -42)
# The following repeats the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
def stdcall_dll(*_): pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
@need_symbol('WinDLL')
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "slozier/ironpython2",
"path": "Src/StdLib/Lib/ctypes/test/test_cfuncs.py",
"copies": "2",
"size": "7962",
"license": "apache-2.0",
"hash": 6299011925261424000,
"line_mean": 36.2056074766,
"line_max": 136,
"alpha_frac": 0.6070082894,
"autogenerated": false,
"ratio": 3.083656080557707,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4690664369957707,
"avg_score": null,
"num_lines": null
} |
# A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.failUnlessEqual(self._dll.tf_b(-126), -42)
self.failUnlessEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.failUnlessEqual(self._dll.tf_bb(0, -126), -42)
self.failUnlessEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.failUnlessEqual(self._dll.tf_B(255), 85)
self.failUnlessEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.failUnlessEqual(self._dll.tf_bB(0, 255), 85)
self.failUnlessEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.failUnlessEqual(self._dll.tf_h(-32766), -10922)
self.failUnlessEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.failUnlessEqual(self._dll.tf_bh(0, -32766), -10922)
self.failUnlessEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.failUnlessEqual(self._dll.tf_H(65535), 21845)
self.failUnlessEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.failUnlessEqual(self._dll.tf_bH(0, 65535), 21845)
self.failUnlessEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tf_i(-2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.failUnlessEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.failUnlessEqual(self._dll.tf_I(4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.failUnlessEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.failUnlessEqual(self._dll.tf_l(-2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.failUnlessEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.failUnlessEqual(self._dll.tf_L(4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.failUnlessEqual(self._dll.tf_bL(' ', 4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.failUnlessEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.failUnlessEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.failUnlessEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.failUnlessEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.failUnlessEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.failUnlessEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.failUnlessEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.failUnlessEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.failUnlessEqual(self._dll.tf_f(-42.), -14.)
self.failUnlessEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.failUnlessEqual(self._dll.tf_bf(0, -42.), -14.)
self.failUnlessEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.failUnlessEqual(self._dll.tf_d(42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.failUnlessEqual(self._dll.tf_bd(0, 42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.failUnlessEqual(self._dll.tf_D(42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.failUnlessEqual(self._dll.tf_bD(0, 42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tf_i(42), 28)
self.failUnlessEqual(self.S(), 42)
self.failUnlessEqual(self._dll.tf_i(-42), -28)
self.failUnlessEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tv_i(42), None)
self.failUnlessEqual(self.S(), 42)
self.failUnlessEqual(self._dll.tv_i(-42), None)
self.failUnlessEqual(self.S(), -42)
# The following repeates the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
pass
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-3.0/Lib/ctypes/test/test_cfuncs.py",
"copies": "53",
"size": "7861",
"license": "mit",
"hash": 2983729085066756000,
"line_mean": 36.2559241706,
"line_max": 92,
"alpha_frac": 0.6123902811,
"autogenerated": false,
"ratio": 3.1838801134062376,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# A lot of failures in these tests on Mac OS X.
# Byte order related?
import unittest
from ctypes import *
import _ctypes_test
class CFunctions(unittest.TestCase):
_dll = CDLL(_ctypes_test.__file__)
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
self.failUnlessEqual(self._dll.tf_b(-126), -42)
self.failUnlessEqual(self.S(), -126)
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
self.failUnlessEqual(self._dll.tf_bb(0, -126), -42)
self.failUnlessEqual(self.S(), -126)
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
self.failUnlessEqual(self._dll.tf_B(255), 85)
self.failUnlessEqual(self.U(), 255)
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
self.failUnlessEqual(self._dll.tf_bB(0, 255), 85)
self.failUnlessEqual(self.U(), 255)
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
self.failUnlessEqual(self._dll.tf_h(-32766), -10922)
self.failUnlessEqual(self.S(), -32766)
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
self.failUnlessEqual(self._dll.tf_bh(0, -32766), -10922)
self.failUnlessEqual(self.S(), -32766)
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
self.failUnlessEqual(self._dll.tf_H(65535), 21845)
self.failUnlessEqual(self.U(), 65535)
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
self.failUnlessEqual(self._dll.tf_bH(0, 65535), 21845)
self.failUnlessEqual(self.U(), 65535)
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tf_i(-2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
self.failUnlessEqual(self._dll.tf_bi(0, -2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
self.failUnlessEqual(self._dll.tf_I(4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
self.failUnlessEqual(self._dll.tf_bI(0, 4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
self.failUnlessEqual(self._dll.tf_l(-2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
self.failUnlessEqual(self._dll.tf_bl(0, -2147483646), -715827882)
self.failUnlessEqual(self.S(), -2147483646)
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
self.failUnlessEqual(self._dll.tf_L(4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
self.failUnlessEqual(self._dll.tf_bL(' ', 4294967295), 1431655765)
self.failUnlessEqual(self.U(), 4294967295)
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
self.failUnlessEqual(self._dll.tf_q(-9223372036854775806), -3074457345618258602)
self.failUnlessEqual(self.S(), -9223372036854775806)
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
self.failUnlessEqual(self._dll.tf_bq(0, -9223372036854775806), -3074457345618258602)
self.failUnlessEqual(self.S(), -9223372036854775806)
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
self.failUnlessEqual(self._dll.tf_Q(18446744073709551615), 6148914691236517205)
self.failUnlessEqual(self.U(), 18446744073709551615)
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
self.failUnlessEqual(self._dll.tf_bQ(0, 18446744073709551615), 6148914691236517205)
self.failUnlessEqual(self.U(), 18446744073709551615)
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
self.failUnlessEqual(self._dll.tf_f(-42.), -14.)
self.failUnlessEqual(self.S(), -42)
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
self.failUnlessEqual(self._dll.tf_bf(0, -42.), -14.)
self.failUnlessEqual(self.S(), -42)
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
self.failUnlessEqual(self._dll.tf_d(42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
self.failUnlessEqual(self._dll.tf_bd(0, 42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_longdouble(self):
self._dll.tf_D.restype = c_longdouble
self._dll.tf_D.argtypes = (c_longdouble,)
self.failUnlessEqual(self._dll.tf_D(42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_longdouble_plus(self):
self._dll.tf_bD.restype = c_longdouble
self._dll.tf_bD.argtypes = (c_byte, c_longdouble)
self.failUnlessEqual(self._dll.tf_bD(0, 42.), 14.)
self.failUnlessEqual(self.S(), 42)
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tf_i(42), 28)
self.failUnlessEqual(self.S(), 42)
self.failUnlessEqual(self._dll.tf_i(-42), -28)
self.failUnlessEqual(self.S(), -42)
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
self.failUnlessEqual(self._dll.tv_i(42), None)
self.failUnlessEqual(self.S(), 42)
self.failUnlessEqual(self._dll.tv_i(-42), None)
self.failUnlessEqual(self.S(), -42)
# The following repeates the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
class stdcallCFunctions(CFunctions):
_dll = stdcall_dll(_ctypes_test.__file__)
pass
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/ctypes/test/test_cfuncs.py",
"copies": "1",
"size": "8072",
"license": "mit",
"hash": -1015857975819339800,
"line_mean": 36.2559241706,
"line_max": 92,
"alpha_frac": 0.596382557,
"autogenerated": false,
"ratio": 3.2614141414141415,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43577966984141414,
"avg_score": null,
"num_lines": null
} |
"""A lot of logic to parse out what is the registry, the image, and the tag"""
import re
class Repository:
"""Container class for holding image repository information.
These values are provided by you in the constructor
repo.domain --- the domain name of the registry the image is held in
repo.port --- the port the registry is hosted at
repo.image --- the name of the image, "ubuntu" as an example
repo.tag --- the specific tag referenced, "14.04" as an example
These values are calculated as niceties
repo.registry --- the full endpoint ready to be wrapped in 'https://{}/' calls
repo.repo --- the full name as specified in a FROM line
Repository also exposes a Repository.match(str) method which can bes used
without instantiation which will return a Repository object if you only
have the combined string version of a repo.
"""
# This matches more than is valid, but not less than is valid. Notably,
# * Slash in the registry portion
# * Adjacent periods are accepted
# But, you had an invalid repo name if you hit either of those.
matcher = re.compile(r'(?:'
r'(?P<domain>localhost|(?:[-_\w]+\.[-_\w]+)+|[-_\w]+(?=:))'
r'(?::(?P<port>\d{1,5}))?'
r'/)?'
r'(?P<image>[-_./\w]+)'
r'(?::(?P<tag>[-_.\w]+))?')
def __init__(self, image, tag='latest', domain=None, port=None):
"""Builds a repository object.
If you only have the combined string version (like from a FROM line of
a Dockerfile) use Repository.match(str) to construct a Repository
Usage:
Repository('ubuntu')
Repository('ubuntu', '14.04')
Repository('my-image', 'dev', 'docker.example.com')
Repository('my-image', 'dev', 'docker.example.com', '5000')
Repository('my-image', domain='docker.example.com', port='5002')
"""
self.domain = domain
self.port = port
self.image = image
self.tag = tag
if domain and port:
self.registry = '{}:{}'.format(self.domain, self.port)
elif domain:
self.registry = self.domain
else:
self.registry = None
if self.registry:
self.repo = '{}/{}:{}'.format(self.registry, self.image, self.tag)
else:
self.repo = '{}:{}'.format(self.image, self.tag)
def __str__(self):
return self.repo
def get_pull_image_name(self):
"""
This function exists because docker pull is the worst API endpoint.
docker.Client.pull() does not allow you to specify a registry to pull from,
but instead believes that ({registry}/){image} is the name of the image.
"""
if self.registry:
return "{}/{}".format(self.registry, self.image)
return self.image
@classmethod
def match(cls, text):
"""Uses a regex to construct a Repository. Matches more than is valid,
but not less than is valid.
Repository.match('docker.example.com/my-image:dev')
Repository.match('docker.example.com:5000/my-image:dev')
Repository.match('docker.example.com/my-image')
"""
match = Repository.matcher.search(text)
return Repository(domain=match.group('domain'),
port=match.group('port'),
image=match.group('image'),
tag=match.group('tag') or 'latest')
| {
"repo_name": "PetroDE/control",
"path": "control/repository.py",
"copies": "1",
"size": "3567",
"license": "mit",
"hash": 4986103093417899000,
"line_mean": 37.7717391304,
"line_max": 84,
"alpha_frac": 0.579758901,
"autogenerated": false,
"ratio": 4.21133412042503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005224276040276616,
"num_lines": 92
} |
# a lot of stuff is copied over from savitar1, but we still need to port more of it
import numpy as np
from scipy.special import expit as sigmoid
from collections import namedtuple
import munkres
from scipy.spatial.distance import cdist
import pycocotools.mask as cocomask
from cv2 import remap, INTER_NEAREST
TrackElement_ = namedtuple("TrackElement", ["t", "box", "reid", "track_id", "class_", "mask", "score"])
TrackElement = namedtuple("TrackElement", ["box", "track_id", "class_", "mask", "score"])
munkres_obj = munkres.Munkres()
def track_single_sequence(tracker_options, boxes, scores, reids, classes, masks, optical_flow=None):
# perform tracking per class and in the end combine the results
classes_flat = [c for cs in classes for c in cs]
unique_classes = np.unique(classes_flat)
start_track_id = 1
class_tracks = []
tracker_options_class = {"tracker": tracker_options["tracker"], "reid_comp": tracker_options["reid_comp"],
"box_offset": tracker_options["box_offset"],
"box_scale": tracker_options["box_scale"]}
for class_ in unique_classes:
if class_ == 1:
tracker_options_class["detection_confidence_threshold"] = tracker_options["detection_confidence_threshold_car"]
tracker_options_class["reid_weight"] = tracker_options["reid_weight_car"]
tracker_options_class["mask_iou_weight"] = tracker_options["mask_iou_weight_car"]
tracker_options_class["bbox_iou_weight"] = tracker_options["bbox_iou_weight_car"]
tracker_options_class["bbox_center_weight"] = tracker_options["bbox_center_weight_car"]
tracker_options_class["association_threshold"] = tracker_options["association_threshold_car"]
tracker_options_class["keep_alive"] = tracker_options["keep_alive_car"]
tracker_options_class["new_reid_threshold"] = tracker_options["new_reid_threshold_car"]
tracker_options_class["reid_euclidean_offset"] = tracker_options["reid_euclidean_offset_car"]
tracker_options_class["reid_euclidean_scale"] = tracker_options["reid_euclidean_scale_car"]
elif class_ == 2:
tracker_options_class["detection_confidence_threshold"] = tracker_options[
"detection_confidence_threshold_pedestrian"]
tracker_options_class["reid_weight"] = tracker_options["reid_weight_pedestrian"]
tracker_options_class["mask_iou_weight"] = tracker_options["mask_iou_weight_pedestrian"]
tracker_options_class["bbox_iou_weight"] = tracker_options["bbox_iou_weight_pedestrian"]
tracker_options_class["bbox_center_weight"] = tracker_options["bbox_center_weight_pedestrian"]
tracker_options_class["association_threshold"] = tracker_options["association_threshold_pedestrian"]
tracker_options_class["keep_alive"] = tracker_options["keep_alive_pedestrian"]
tracker_options_class["new_reid_threshold"] = tracker_options["new_reid_threshold_pedestrian"]
tracker_options_class["reid_euclidean_offset"] = tracker_options["reid_euclidean_offset_pedestrian"]
tracker_options_class["reid_euclidean_scale"] = tracker_options["reid_euclidean_scale_pedestrian"]
else:
assert False, "unknown class"
if tracker_options["new_reid"]:
tracks = tracker_per_class_new_reid(tracker_options_class, boxes, scores, reids, classes, masks, class_,
start_track_id, optical_flow=optical_flow)
else:
tracks = tracker_per_class(tracker_options_class, boxes, scores, reids, classes, masks, class_, start_track_id,
optical_flow=optical_flow)
class_tracks.append(tracks)
track_ids_flat = [track.track_id for tracks_t in tracks for track in tracks_t]
track_ids_flat.append(start_track_id)
start_track_id = max(track_ids_flat) + 1
n_timesteps = len(boxes)
tracks_combined = [[] for _ in range(n_timesteps)]
for tracks_c in class_tracks:
for t, tracks_c_t in enumerate(tracks_c):
tracks_combined[t].extend(tracks_c_t)
return tracks_combined
def tracker_per_class(tracker_options, boxes, scores, reids, classes, masks, class_to_track, start_track_id,
optical_flow=None):
max_track_id = start_track_id
all_tracks = []
active_tracks = []
if optical_flow is None:
optical_flow = [None for _ in masks]
else:
optical_flow = [None] + optical_flow
assert len(boxes) == len(scores) == len(reids) == len(classes) == len(masks) == len(optical_flow)
for t, (boxes_t, scores_t, reids_t, classes_t, masks_t, flow_tm1_t) in enumerate(zip(boxes, scores, reids,
classes, masks, optical_flow)):
detections_t = []
for box, score, reid, class_, mask in zip(boxes_t, scores_t, reids_t, classes_t, masks_t):
if class_ != class_to_track:
continue
if mask is not None and cocomask.area(mask) <= 10:
continue
if score >= tracker_options["detection_confidence_threshold"]:
detections_t.append((box, reid, mask, class_, score))
else:
continue
if len(detections_t) == 0:
curr_tracks = []
elif len(active_tracks) == 0:
curr_tracks = []
for det in detections_t:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
else:
association_similarities = np.zeros((len(detections_t), len(active_tracks)))
if tracker_options["reid_weight"] != 0:
curr_reids = np.array([x[1] for x in detections_t], dtype="float64")
last_reids = np.array([x.reid for x in active_tracks], dtype="float64")
if tracker_options["reid_comp"] == "sigmoid_dot":
reid_similarities = sigmoid(np.dot(curr_reids, last_reids.T))
elif tracker_options["reid_comp"] == "cosine":
reid_similarities = np.dot(curr_reids/np.linalg.norm(curr_reids, axis=1, ord=2)[:, np.newaxis],
(last_reids/np.linalg.norm(last_reids, axis=1, ord=2)[:, np.newaxis]).T)
elif tracker_options["reid_comp"] == "euclidean":
reid_dists = cdist(curr_reids, last_reids, "euclidean")
reid_similarities = tracker_options["reid_euclidean_scale"] *\
(tracker_options["reid_euclidean_offset"] - reid_dists)
elif tracker_options["reid_comp"] == "normalized_euclidean":
reid_dists = cdist(curr_reids/np.linalg.norm(curr_reids, axis=1, ord=2)[:, np.newaxis],
last_reids/np.linalg.norm(last_reids, axis=1, ord=2)[:, np.newaxis], "euclidean")
reid_similarities = 1 - reid_dists
else:
assert False
association_similarities += tracker_options["reid_weight"] * reid_similarities
if tracker_options["mask_iou_weight"] != 0:
# Prepare flow
h, w = flow_tm1_t.shape[:2]
flow_tm1_t = -flow_tm1_t
flow_tm1_t[:, :, 0] += np.arange(w)
flow_tm1_t[:, :, 1] += np.arange(h)[:, np.newaxis]
masks_t = [v[2] for v in detections_t]
masks_tm1 = [v.mask for v in active_tracks]
masks_tm1_warped = [warp_flow(mask, flow_tm1_t) for mask in masks_tm1]
mask_ious = cocomask.iou(masks_t, masks_tm1_warped, [False] * len(masks_tm1_warped))
association_similarities += tracker_options["mask_iou_weight"] * mask_ious
if tracker_options["bbox_center_weight"] != 0:
centers_t = [v[0][0:2] + (v[0][2:4] - v[0][0:2]) / 2 for v in detections_t]
centers_tm1 = [v.box[0:2] + (v.box[2:4] - v.box[0:2]) / 2 for v in active_tracks]
box_dists = cdist(np.array(centers_t), np.array(centers_tm1), "euclidean")
box_similarities = tracker_options["box_scale"] *\
(tracker_options["box_offset"] - box_dists)
association_similarities += tracker_options["bbox_center_weight"] * box_similarities
if tracker_options["bbox_iou_weight"] != 0:
bboxes_t = [v[0] for v in detections_t]
bboxes_tm1 = [v.box for v in active_tracks]
bboxes_tm1_warped = [warp_box(box, flow_tm1_t) for box in bboxes_tm1]
bbox_ious = np.array([[bbox_iou(box1, box2) for box1 in bboxes_tm1_warped] for box2 in bboxes_t])
assert (0 <= bbox_ious).all() and (bbox_ious <= 1).all()
association_similarities += tracker_options["bbox_iou_weight"] * bbox_ious
curr_tracks = []
detections_assigned = [False for _ in detections_t]
if tracker_options["tracker"] == "greedy":
while True:
idx = association_similarities.argmax()
idx = np.unravel_index(idx, association_similarities.shape)
val = association_similarities[idx]
if val < tracker_options["association_threshold"]:
break
det = detections_t[idx[0]]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=active_tracks[idx[1]].track_id, score=det[4])
curr_tracks.append(te)
detections_assigned[idx[0]] = True
association_similarities[idx[0], :] = -1e10
association_similarities[:, idx[1]] = -1e10
elif tracker_options["tracker"] == "hungarian":
cost_matrix = munkres.make_cost_matrix(association_similarities)
disallow_indices = np.argwhere(association_similarities <= tracker_options["association_threshold"])
for ind in disallow_indices:
cost_matrix[ind[0]][ind[1]] = 1e9
indexes = munkres_obj.compute(cost_matrix)
for row, column in indexes:
value = cost_matrix[row][column]
if value == 1e9:
continue
det = detections_t[row]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=active_tracks[column].track_id, score=det[4])
curr_tracks.append(te)
detections_assigned[row] = True
else:
assert False
for det, assigned in zip(detections_t, detections_assigned):
if not assigned:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
all_tracks.append(curr_tracks)
newly_active_ids = {track.track_id for track in curr_tracks}
active_tracks = [track for track in active_tracks
if track.track_id not in newly_active_ids and track.t >= t - tracker_options["keep_alive"]]
active_tracks.extend(curr_tracks)
# remove the reid values, since they are an implementation detail of the tracker and should not be part of the result
result = [[TrackElement(box=track.box, track_id=track.track_id, mask=track.mask, class_=track.class_, score=track.score)
for track in tracks_t] for tracks_t in all_tracks]
return result
def tracker_per_class_new_reid(tracker_options, boxes, scores, reids, classes, masks, class_to_track, start_track_id,
optical_flow=None):
assert tracker_options["reid_comp"] == "euclidean"
assert tracker_options["tracker"] == "hungarian"
max_track_id = start_track_id
all_tracks = []
last_tracks = []
if optical_flow is None:
optical_flow = [None for _ in masks]
else:
optical_flow = [None] + optical_flow
assert len(boxes) == len(scores) == len(reids) == len(classes) == len(masks) == len(optical_flow)
for t, (boxes_t, scores_t, reids_t, classes_t, masks_t, flow_tm1_t) in enumerate(zip(boxes, scores, reids,
classes, masks, optical_flow)):
curr_tracks = []
assigned_track_ids = []
all_detections_t = []
### build all_detections_t
for box, score, reid, class_, mask in zip(boxes_t, scores_t, reids_t, classes_t, masks_t):
if class_ != class_to_track:
continue
if mask is not None and cocomask.area(mask) <= 10:
continue
all_detections_t.append((box, reid, mask, class_, score))
# assign high confidence dets by association scores
high_confidence_detections_t = [d for d in all_detections_t if
d[4] >= tracker_options["detection_confidence_threshold"]]
detections_assigned = [False for _ in high_confidence_detections_t]
if len(high_confidence_detections_t) > 0 and len(last_tracks) > 0:
association_similarities = calculate_association_similarities(high_confidence_detections_t, last_tracks,
flow_tm1_t, tracker_options)
cost_matrix = munkres.make_cost_matrix(association_similarities)
disallow_indices = np.argwhere(association_similarities <= tracker_options["association_threshold"])
for ind in disallow_indices:
cost_matrix[ind[0]][ind[1]] = 1e9
indexes = munkres_obj.compute(cost_matrix)
for row, column in indexes:
value = cost_matrix[row][column]
if value == 1e9:
continue
det = high_confidence_detections_t[row]
track_id = last_tracks[column].track_id
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=track_id, score=det[4])
assigned_track_ids.append(track_id)
curr_tracks.append(te)
detections_assigned[row] = True
#### begin reid stuff ####
old_tracks = []
for tracks_in_time_step in all_tracks:
for track_obj in tracks_in_time_step:
if track_obj.track_id not in assigned_track_ids:
old_tracks.append(track_obj)
old_reids = np.array([x.reid for x in old_tracks], dtype="float32")
# low conf dets
dets_for_reid = [d for d in all_detections_t if d[4] < tracker_options["detection_confidence_threshold"]]
# use unassigned high conf dets as well?
for det, assigned in zip(high_confidence_detections_t, detections_assigned):
if not assigned:
dets_for_reid.append(det)
curr_reids = np.array([d[1] for d in dets_for_reid], dtype="float32")
reided_dets = []
if old_reids.size > 0 and curr_reids.size > 0:
reid_dists = cdist(curr_reids, old_reids, "euclidean")
while True:
idx = reid_dists.argmin()
idx = np.unravel_index(idx, reid_dists.shape)
val = reid_dists[idx]
if val > tracker_options["new_reid_threshold"]:
break
#print("reided", class_to_track, val)
det = dets_for_reid[idx[0]]
reided_dets.append(det)
track = old_tracks[idx[1]]
te = TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=track.track_id, score=det[4])
curr_tracks.append(te)
reid_dists[idx[0], :] = 1e10
for idx, track2 in enumerate(old_tracks):
if track.track_id == track2.track_id:
reid_dists[:, idx] = 1e10
### end reid stuff ###
# assign every high confidence det which has neither been propagated nor reided to a new track
for det, assigned in zip(high_confidence_detections_t, detections_assigned):
if not assigned:
curr_tracks.append(TrackElement_(t=t, box=det[0], reid=det[1], mask=det[2], class_=det[3],
track_id=max_track_id, score=det[4]))
max_track_id += 1
all_tracks.append(curr_tracks)
last_tracks = curr_tracks
# remove the reid values, since they are an implementation detail of the tracker and should not be part of the result
result = [[TrackElement(box=track.box, track_id=track.track_id, mask=track.mask, class_=track.class_, score=track.score)
for track in tracks_t] for tracks_t in all_tracks]
return result
def calculate_association_similarities(detections_t, last_tracks, flow_tm1_t, tracker_options):
association_similarities = np.zeros((len(detections_t), len(last_tracks)))
if tracker_options["reid_weight"] != 0:
curr_reids = np.array([x[1] for x in detections_t], dtype="float64")
last_reids = np.array([x.reid for x in last_tracks], dtype="float64")
reid_dists = cdist(curr_reids, last_reids, "euclidean")
reid_similarities = tracker_options["reid_euclidean_scale"] * \
(tracker_options["reid_euclidean_offset"] - reid_dists)
association_similarities += tracker_options["reid_weight"] * reid_similarities
if tracker_options["mask_iou_weight"] != 0:
# Prepare flow
h, w = flow_tm1_t.shape[:2]
flow_tm1_t = -flow_tm1_t
flow_tm1_t[:, :, 0] += np.arange(w)
flow_tm1_t[:, :, 1] += np.arange(h)[:, np.newaxis]
masks_t = [v[2] for v in detections_t]
masks_tm1 = [v.mask for v in last_tracks]
masks_tm1_warped = [warp_flow(mask, flow_tm1_t) for mask in masks_tm1]
mask_ious = cocomask.iou(masks_t, masks_tm1_warped, [False] * len(masks_tm1_warped))
association_similarities += tracker_options["mask_iou_weight"] * mask_ious
if tracker_options["bbox_center_weight"] != 0:
centers_t = [v[0][0:2] + (v[0][2:4] - v[0][0:2]) / 2 for v in detections_t]
centers_tm1 = [v.box[0:2] + (v.box[2:4] - v.box[0:2]) / 2 for v in last_tracks]
box_dists = cdist(np.array(centers_t), np.array(centers_tm1), "euclidean")
box_similarities = tracker_options["box_scale"] * \
(tracker_options["box_offset"] - box_dists)
association_similarities += tracker_options["bbox_center_weight"] * box_similarities
if tracker_options["bbox_iou_weight"] != 0:
bboxes_t = [v[0] for v in detections_t]
bboxes_tm1 = [v.box for v in last_tracks]
bboxes_tm1_warped = [warp_box(box, flow_tm1_t) for box in bboxes_tm1]
bbox_ious = np.array([[bbox_iou(box1, box2) for box1 in bboxes_tm1_warped] for box2 in bboxes_t])
assert (0 <= bbox_ious).all() and (bbox_ious <= 1).all()
association_similarities += tracker_options["bbox_iou_weight"] * bbox_ious
return association_similarities
def warp_flow(mask_as_rle, flow):
# unpack
mask = cocomask.decode([mask_as_rle])
# warp
warped = _warp(mask, flow)
# pack
packed = cocomask.encode(np.asfortranarray(warped))
return packed
def _warp(img, flow):
# for some reason the result is all zeros with INTER_LINEAR...
# res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
res = remap(img, flow, None, INTER_NEAREST)
res = np.equal(res, 1).astype(np.uint8)
return res
def bbox_iou(box1, box2):
x0_min = min(box1[0], box2[0])
x0_max = max(box1[0], box2[0])
y0_min = min(box1[1], box2[1])
y0_max = max(box1[1], box2[1])
x1_min = min(box1[2], box2[2])
x1_max = max(box1[2], box2[2])
y1_min = min(box1[3], box2[3])
y1_max = max(box1[3], box2[3])
I = max(x1_min - x0_max, 0) * max(y1_min - y0_max, 0)
U = (x1_max - x0_min) * (y1_max - y0_min)
if U == 0:
return 0.0
else:
return I / U
def warp_box(box, flow):
box_rounded = np.maximum(box.round().astype("int32"), 0)
x0, y0, x1, y1 = box_rounded
flows = flow[y0:y1, x0:x1]
flows_x = flows[:, :, 0]
flows_y = flows[:, :, 1]
flow_x = np.median(flows_x)
flow_y = np.median(flows_y)
box_warped = box + [flow_x, flow_y, flow_x, flow_y]
return box_warped
| {
"repo_name": "VisualComputingInstitute/TrackR-CNN",
"path": "forwarding/tracking/Util_tracking.py",
"copies": "1",
"size": "19326",
"license": "mit",
"hash": -3424239823979874300,
"line_mean": 49.328125,
"line_max": 122,
"alpha_frac": 0.6244437545,
"autogenerated": false,
"ratio": 3.190688459633482,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9242092412801732,
"avg_score": 0.014607960266350035,
"num_lines": 384
} |
# A lot of the expressions and terminology come
# from Hu et al. 2015 and Cowan & Agol 2011, whom I
# would like to thank.
#
# Written by Tiffany Jansen
# Columbia University Astronomy
# Last updated April 2017
#
# For any questions about the code,
# please contact Tiffany at jansent@astro.columbia.edu
#
# For any questions about the science, please consult the
# following papers:
#
# Jansen, T. & Kipping, D. submitted to MNRAS
# Hu, R., Demory, B.-O., Seager, S., Lewis, N., & Showman, A. P.2015, ApJ, 802, 51
# Cowan, N. B., & Agol, E. 2011, ApJ, 726, 82
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
from scipy.integrate import odeint
import astropy.constants as const
from scipy.interpolate import interp1d
wls, K_response = np.genfromtxt(os.getcwd() + '/kepler_hires.dat', usecols=(0,1), unpack=True)
wls *= 1e-6
h = const.h.value
c = const.c.value
hc = h*c
k_B = const.k_B.value
R_earth = const.R_earth.value
R_sun = const.R_sun.value
au = const.au.value
numer = 2 * const.h.value * const.c.value**2
# Interpolate the planck function for the planet
int_planck_f = os.getcwd() + '/integrated_planck_table.dat'
T_eff_lib, int_planck = np.genfromtxt(int_planck_f, usecols=(0,1), unpack=True)
interpolate_planck = interp1d(T_eff_lib, int_planck)
### SYMMETRIC ###
def sym_reflection(Rp, semi_a, phase, Ab):
"""Symmetric reflection component of the phase curve.
Args:
Rp (float): radius of the planet in m
a (float): semi major axis of the planet in m
phase (array): phase angles of the planet in radians
Ag (float): geometric albedo of the planet
Returns:
array, normalized by host flux
"""
Ag = 2/3 * Ab # in the lambertian sphere approximation
refl_component = (Rp / semi_a) ** 2 * Ag
phase = abs(phase)
phase_component = (np.sin(phase) + (np.pi - phase) * np.cos(phase)) / np.pi
return refl_component * phase_component
### ANTI-SYMMETRIC ###
def q_Phi_arrays(alpha, xi1, xi2):
""" Returns the phase integral and function for multiple phases.
Args:
alpha (float): phase angle of the planet in radians
xi1 (float): local longitude in radians designating
the start of the region with low reflectivity
xi2 (float): local longitude in radians designating
the end of the region with low reflectivity
Returns:
array
"""
q = phase_integral(xi1, xi2)[0] # excluding integral errors
Phi = phase_function(alpha, xi1, xi2)
return q, Phi
def plot_q_xi1xi2():
""" Plots the phase integral as a function of xi1 and xi2
"""
alpha = np.linspace(-np.pi, np.pi, 50)
xi1 = np.arange(-5 * np.pi / 10, 5 * np.pi / 10, np.pi / 10)
for i in range(len(xi1)):
q = []
xi2 = np.linspace(-np.pi/2, np.pi/2, 50)
for j in range(len(xi2)):
q += [phase_integral(xi1[i], xi2[j])]
plt.plot(xi2, q, color='black', lw=2)
plt.xlabel(r'$\xi_{2}$')
plt.ylabel('q')
plt.ylim([0, 1.5])
plt.xlim([-np.pi/2, np.pi/2])
plt.show()
return
def reflectivity(Ab, kappa, q):
"""The reflectivity parameters determined by the Bond Albedo and the
phase integral for the patchy atmosphere scenerio.
Args:
Ab (float): bond albedo
kappa (float): reflectivity boosting factor. < 1 for dark patch,
>1 for bright patch
q (float): value of the phase integral at one angle
Returns:
tuple of floats: the reflectivity parameters r0 and r1, which
determine the reflectivity of the patchy regions
"""
r0 = Ab / (1 + (2 / 3) * q * kappa)
r1 = kappa * r0
return r0, r1
def phase_function(alpha, xi1, xi2):
"""Determines a value of the phase curve for a given phase angle
considering the patchy regions in the patchy cloud scenerio.
Equations A.3 - A.8 in Hu et al. 2015.
Note:
-pi/2 < xi1 <= xi2 < pi/2
-pi <= alpha <= pi
Args:
alpha (float): phase angle of the planet in radians
xi1 (float): local longitude in radians designating
the start of the region with low reflectivity
xi2 (float): local longitude in radians designating
the end of the region with low reflectivity
Returns:
float
"""
a = -alpha
if (-np.pi <= a) & (a <= 0):
if (-np.pi / 2 <= a + np.pi / 2) & (a + np.pi / 2 <= xi1):
return 1 / np.pi * (np.cos(a) * (np.pi + a) + np.sin(np.pi + a))
elif (xi1 <= a + np.pi / 2) & (a + np.pi / 2 <= xi2):
return 1 / np.pi * (np.cos(a) * (np.pi / 2 + xi1) \
+ 0.5 * (np.sin(np.pi + a) + np.sin(2 * xi1 - a)))
elif (xi2 <= a + np.pi / 2) & (a + np.pi / 2 <= np.pi / 2):
return 1 / np.pi * (np.cos(a) * (np.pi + a + xi1 - xi2) \
+ np.sin(np.pi + a) + np.cos(xi1 + xi2 - a) * np.sin(xi1 - xi2))
elif (0 <= a) & (a <= np.pi):
if (-np.pi / 2 <= a - np.pi / 2) & (a - np.pi / 2 <= xi1):
return 1 / np.pi * (np.cos(a) * (np.pi - a + xi1 - xi2) \
+ np.sin(a) + np.cos(xi1 + xi2 - a) * np.sin(xi1 - xi2))
elif (xi1 <= a - np.pi / 2) & (a - np.pi / 2 <= xi2):
return 1 / np.pi * (np.cos(a) * (np.pi / 2 - xi2) \
+ 0.5 * (np.sin(a) - np.sin(2 * xi2 - a)))
elif (xi2 <= a - np.pi / 2) & (a - np.pi / 2 <= np.pi / 2):
return 1 / np.pi * (np.cos(a) * (np.pi - a) + np.sin(a))
else:
raise ValueError("D'oh! None of the possible conditionals passed. Check" \
+ " that alpha and xi1,xi2 are within the right ranges.")
def phase_integral(xi1, xi2):
"""Integrates the phase function with respect to phase angle to account for
the additional asymmetric reflection in the patchy cloud scenerio.
Args:
alpha (float): phase angle of the planet in radians
xi1 (float): local longitude in radians designating
the start of the region with low reflectivity
xi2 (float): local longitude in radians designating
the end of the region with low reflectivity
Returns:
float
"""
integrand = lambda alpha: phase_function(alpha, xi1, xi2) * np.sin(abs(alpha))
result = integrate.quad(integrand, -np.pi, np.pi)
return result
def antisym_reflection(Ag, kappa, phase, xi1, xi2, Rp, semi_a):
"""Antisymmetric reflection component of the phase curve.
Args:
Returns:
array, normalized by host flux
"""
q = phase_integral(xi1, xi2)[0]
Ab = q * Ag
r0, r1 = reflectivity(Ab, kappa, q)
sym_refl = sym_reflection(Rp, semi_a, phase, Ag) * 2 * r0 / 3
Phi = []
for alpha in phase:
Phi += [phase_function(alpha, xi1, xi2)]
patchy_refl = (Rp / semi_a) ** 2 * 2 * r1 / 3 * np.array(Phi)
return sym_refl + patchy_refl
### THERMAL ###
def dP_dXi(P, xi, eps):
"""Expression for the derivative of the thermal phase function P with respect
to the local longitude xi.
Note:
This can't be solved analytically, which is why P is solved by
scipy's odeint in thermal_phase_func().
Args:
P (array): thermal phase function
xi (array/float): local longitude in radians
eps (float): thermal redistribution factor
Returns:
array
"""
return 1 / eps * (0.5 * (np.cos(xi) + abs(np.cos(xi))) - P**4)
def thermal_phase_func(eps, phase):
xi = np.linspace(-np.pi/2, 3*np.pi/2, len(phase))
if eps == 0.0:
P = np.zeros(len(xi))
nonzero_mask = (xi > -np.pi / 2) & (xi < np.pi / 2)
P[nonzero_mask] = 0.5 * (np.cos(xi[nonzero_mask])**(1/4) + \
abs(np.cos(xi[nonzero_mask]))**(1/4))
return P
g = (3 * np.pi/eps) ** 4
P_dawn = (np.pi + g**(1/3))**(-1/4) # initial condition for P
P = odeint(dP_dXi, P_dawn, xi, args=(eps,))
P = np.array(P).ravel()
return P
def analytic_P(phase, eps):
"""Analytic thermal phase function from Cowan & Agol 2011"""
xi = np.linspace(-np.pi/2, 3*np.pi/2, len(phase))
T0 = np.pi**(-1/4)
gamma = 4 * T0**3 / eps
day = (xi > -np.pi / 2) & (xi < np.pi / 2)
night = (xi > np.pi / 2) & (xi < 3 * np.pi / 2)
Pday = 3 / 4 * T0 + (gamma * np.cos(xi[day]) \
+ np.sin(xi[day]))/(eps * (1 + gamma**2)) \
+ np.exp(-gamma * xi[day]) / \
(2 * eps * (1 + gamma ** 2) * np.sinh(np.pi * gamma / 2))
Pnight = 3 / 4 * T0 + np.exp(-gamma * (xi[night] - np.pi)) \
/ (2 * eps * (1 + gamma ** 2) * np.sinh(np.pi * gamma / 2))
Pdawn = [(np.pi + (3 * np.pi / eps)**(4/3))**(-1/4)]
y0 = 0.69073
y1 = 7.5534
Pdusk = [(np.pi**2 * (1 + y0/eps)**(-8) + y1 * eps**(-8/7))**(-1/8)]
return Pdawn + Pday.tolist() + Pdusk + Pnight.tolist() + Pdawn
def P_phi(P, alpha, res=12):
"""Gives the value of the thermal phase as a function of phi.
Note:
phi is defined by xi = phi - alpha
Args:
P (array): thermal phase function as a function of xi
phi (float): local longitude transformed to the observer's frame
alpha (float): phase angle
res (int): long/lat resolution. splits planetary surface up into
[180 / res]-square degree grids. default = 15 degree^2 grids
Returns:
array
"""
xi = np.linspace(-np.pi/2, 3*np.pi/2, len(P))
xi_start = -np.pi/2 - alpha
xi_end = np.pi/2 - alpha
if alpha > 0:
condition1 = xi >= xi_start + 2 * np.pi
condition2 = xi <= xi_end
P_p = np.append(P[condition1], P[condition2])
elif alpha <= 0:
P_p = P[(xi >= xi_start) & (xi <= xi_end)]
# interpolate to grab the value of P every 10 degrees of longitude
phi_idx = np.arange(0, res*40 + 1, 40)
phi_P = np.linspace(-np.pi/2, np.pi/2, len(P_p))
phi_interp = np.linspace(-np.pi/2, np.pi/2, res*40 + 1)
P_interp = np.interp(phi_interp, phi_P, P_p)
return P_interp[phi_idx]
def planck_function(T_eff):
"""Planck function convolved with the Kepler bandpass.
Note:
Must have kepler_hires.dat in your working directory.
Args:
T_eff (array with shape (lons, lats, 1)): effective temperature in Kelvins
Returns:
float: radiance (W m-2 sr-1) as detected by Kepler, i.e. value of the
Planck function integrated over the Kepler bandpass.
"""
B = numer / (wls**5)
e = np.exp(hc / (wls * k_B * T_eff))
integrand = B / (e - 1) * K_response
Bk = np.trapz(integrand, wls)
return Bk
def find_nearest_planck(T_eff):
""" Finds the nearest value to 'value' in the array."""
idx = np.abs(T_eff_lib-T_eff).argmin(axis=2)
return int_planck[idx]
def T_eff(f, P, alpha, Ts, Rs, semi_a, Ab, res=12):
""" Effective temperature of the planet as a function of
alpha, planetary longitude, and planetary latitude.
i.e., the temperature distribution across the planet's surface.
Args:
res (int): long/lat resolution. splits planetary surface up into
[180 / res]-square degree grids. default = 15 degree^2 grids
Returns:
"""
P_eps_alpha = P_phi(P, alpha, res)
theta = np.linspace(np.pi / 2, -np.pi / 2, res + 1)[:, None]
T0_theta = Ts * np.sqrt(Rs / semi_a) * (1 - Ab)**(1/4) * np.cos(theta)**(1/4)
return f * T0_theta * P_eps_alpha
def thermal_integrand(theta, phi, alpha, P, Ab, eps, f, Ts, Rs, semi_a, res=12):
"""Integrand of the thermal phase dependency of the planet's
thermal emission.
Note:
This function's theta is NOT the mcmc parameter tuple. See Args.
Args:
theta (array): planetary latitude in radians
phi (array): planetary longitude from observer's POV
alpha (float): phase angle of the planet in radians
eps (float): thermal redistribution factor
f (float): greenhouse factor
Ts (float): effective temperature of the star
Rs (float): radius of the star
semi_a (float): semi major axis of the planet
res (int): long/lat resolution. splits planetary surface up into
[180 / res]-square degree grids. default = 15 degree^2 grids
Returns:
array in units normalized by the host star
"""
#temperature distribution function
T = T_eff(f, P, alpha, Ts, Rs, semi_a, Ab, res)
# planck function for the planet integrated over Kepler bandpass
Bk = interpolate_planck(T)
return Bk * np.cos(theta)**2 * np.cos(phi)
def thermal(phase, Ab, eps, f, Rp, Rs, Ts, semi_a, res=12):
"""Thermal component of the phase curve.
Args:
phase (array): phase angles of the planet [radians]
Ab (float): bond albedo
eps (float): thermal redistribution factor
f (float): greenhouse factor
Rp (float): radius of the planet [m]
Rs (float): radius of the star [m]
Ts (float): effective temperature of the star [K]
semi_a (float): semi major axis of the planet [m]
res (int): long/lat resolution. Must satisfy 180 mod res = 0.
Splits planetary surface up into
[180 / res]-square degree grids. default = 15 degree^2 grids
Returns:
array, normalized by host flux
"""
P = thermal_phase_func(abs(eps), phase)
# len() = 19 for a surface resolution of 10 x 10 degrees
theta = np.linspace(np.pi / 2, -np.pi / 2, res + 1)[:, None]
phi = np.linspace(-np.pi / 2, np.pi / 2, res + 1)
F_T_norm = []
for alpha in phase:
Bk = thermal_integrand(theta, phi, alpha, P, Ab, abs(eps), f, Ts, Rs, semi_a, res)
Bs = planck_function(Ts) # integrated planck function of the host
inner = np.trapz(Bk, phi) # integrate over longitude
F_T = np.trapz(inner, theta.ravel()[::-1]) # integrate over latitude
F_T_norm += [F_T / (Bs * np.pi * Rs**2)]
if eps < 0.0:
return Rp**2 * np.array(F_T_norm[::-1])
return Rp**2 * np.array(F_T_norm)
### BRING IT AROUND TOWN ###
def therm_sref(n_samples, kepid, phase, Ab, eps, f, Rp, Rs, Ts, semi_a):
therm = thermal(phase, Ab, eps, f, R_earth * Rp, R_sun * Rs, Ts, au * semi_a) * 1e6
s_ref = sym_reflection(Rp, semi_a, phase, Ab) * 1e6
return therm + s_ref
def run(phase, Ab, eps, f, kappa, xi1, xi2, Rp, Rs, Ts, semi_a, \
res=12, therm=True, s_reflection=True, a_reflection=True):
""" Returns the total phase curve model and any components of the model.
i.e., if therm = True, s_reflection = True, a_reflection = False, this function
will return the thermal component, symmetric component, and the total thermal +
symmetric model.
Notes:
Returns the full model on default.
If a_reflection==False, set kappa, xi1, xi2 to None.
Args:
phase (array): phase angles in radians
eps (float): thermal redistribution factor
f (float): greenhouse factor
kappa (float): reflectivity boosting factor. < 1 for dark patch,
>1 for bright patch. Set to None if a_reflection==False
xi1 (float): local longitude in radians designating
the start of the region with low reflectivity. Set to None
if a_reflection==False
xi2 (float): local longitude in radians designating
the end of the region with low reflectivity. Set to None
if a_reflection==False
Rp (float): radius of the planet [Earth radii]
Rs (float): radius of the star [Solar radii]
Ts (float): effective temperature of the star [K]
semi_a (float): semi major axis of the planet [AU]
res (int): long/lat resolution. splits planetary surface up into
[180 / res]-square degree grids. default = 15 degree^2 grids
therm (bool): Returns the thermal component of the model if True
s_reflection (bool): Returns the symmetric reflection component of
the model if True
a_reflection (bool): Returns the asymmetric reflection component of
the model if True
Returns:
array or tuple of arrays
"""
Rp = R_earth * Rp
Rs = R_sun * Rs
semi_a = au * semi_a
if [therm, s_reflection, a_reflection] == [True, False, False]:
therm = thermal(phase, Ab, eps, f, Rp, Rs, Ts, semi_a, res=res) * 1e6
return therm
elif [therm, s_reflection, a_reflection] == [False, True, False]:
s_ref = sym_reflection(Rp, semi_a, phase, Ab) * 1e6
return s_ref
elif [therm, s_reflection, a_reflection] == [False, False, True]:
a_ref = antisym_reflection(Ag, kappa, phase, xi1, xi2, Rp, semi_a) * 1e6
return a_ref
elif [therm, s_reflection, a_reflection] == [True, True, False]:
therm = thermal(phase, Ab, eps, f, Rp, Rs, Ts, semi_a, res=res) * 1e6
s_ref = sym_reflection(Rp, semi_a, phase, Ab) * 1e6
return therm, s_ref, therm + s_ref
elif [therm, s_reflection, a_reflection] == [True, False, True]:
therm = thermal(phase, Ab, eps, f, Rp, Rs, Ts, semi_a, res=res) * 1e6
a_ref = antisym_reflection(Ag, kappa, phase, xi1, xi2, Rp, semi_a) * 1e6
return therm, a_ref, therm + a_ref
elif [therm, s_reflection, a_reflection] == [False, True, True]:
s_ref = sym_reflection(Rp, semi_a, phase, Ab) * 1e6
a_ref = antisym_reflection(Ag, kappa, phase, xi1, xi2, Rp, semi_a) * 1e6
return s_ref, a_ref, s_ref + a_ref
elif [therm, s_reflection, a_reflection] == [True, True, True]:
therm = thermal(phase, Ab, eps, f, Rp, Rs, Ts, semi_a, res=res) * 1e6
s_ref = sym_reflection(Rp, semi_a, phase, Ab) * 1e6
a_ref = antisym_reflection(Ag, kappa, phase, xi1, xi2, Rp, semi_a) * 1e6
return therm, s_ref, a_ref, therm + s_ref + a_ref
else:
print("You must set at least one component of the model to True")
f.close()
return
if __name__ == "__main__":
assert phase_integral(np.pi/4, np.pi/4)[0] == 3/2, "Houston, we've had a problem..."
| {
"repo_name": "tcjansen/phasecurve_model",
"path": "forward_model.py",
"copies": "1",
"size": "16492",
"license": "mit",
"hash": 86861062405145780,
"line_mean": 30.7764932563,
"line_max": 94,
"alpha_frac": 0.6465559059,
"autogenerated": false,
"ratio": 2.644219977553311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3790775883453311,
"avg_score": null,
"num_lines": null
} |
# A lot of this code come from http://docs.sympy.org/dev/_modules/sympy/parsing/mathematica.html
# http://sympy.org/en/index.html
from .mathematicaYacc import mathematicaParser
from .mathematicaLex import mathematicaLexer
from .calchasPreprocessing import calchasToSympy
from .latexPreprocessing import isLatex
from .latexYacc import latexToCalchas
from re import match, sub
class Parser():
def __init__(self, expr):
self.expr=expr
def fromCalchas(self):
result = calchasToSympy('(%s)' % self.expr)
if result:
self.expr=result
def fromMathematica(self):
result = mathematicaParser.parse('(%s)' % self.expr, lexer=mathematicaLexer)
if result:
self.expr=result.toCalchas()
def fromLatex(self):
if isLatex(self.expr):
result = latexToCalchas('(%s)' % self.expr)
if result:
self.expr=result
def normalize(self):
self.fromLatex()
self.fromMathematica()
self.fromCalchas()
return 'simplify(%s,2)' % self.expr
| {
"repo_name": "iScienceLuvr/PPP-CAS",
"path": "ppp_cas/parser.py",
"copies": "1",
"size": "1073",
"license": "mit",
"hash": -6537421588664015000,
"line_mean": 29.6571428571,
"line_max": 96,
"alpha_frac": 0.6533084809,
"autogenerated": false,
"ratio": 3.4837662337662336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46370747146662333,
"avg_score": null,
"num_lines": null
} |
# A lot of this code come from https://github.com/sympy/sympy_gamma/blob/master/app/logic/logic.py
# http://www.sympygamma.com/
import sys
import queue
import collections
from sympy import latex
from sympy.parsing.sympy_parser import stringify_expr, eval_expr, standard_transformations
from .parser import Parser
from .config import Config
from .supyprocess import process
PREEXEC = """from sympy import *"""
def evaluate(s):
result = None
parser = Parser(s)
inputFormula=parser.normalize()
expr, latex = process(eval_input, inputFormula, timeout=Config().timeout, heap_size=Config().max_heap)
return expr, latex
def eval_input(s):
namespace = {}
exec(PREEXEC, {}, namespace)
def plot(f=None, **kwargs):
pass
namespace.update({
'plot': plot, # prevent textplot from printing stuff
'help': lambda f: f
})
transformations = list(standard_transformations)
parsed = stringify_expr(s, {}, namespace, transformations)
try:
evaluated = eval_expr(parsed, {}, namespace)
except SyntaxError:
raise
except Exception as e:
raise ValueError(str(e))
return str(evaluated), latex(evaluated)
| {
"repo_name": "iScienceLuvr/PPP-CAS",
"path": "ppp_cas/evaluator.py",
"copies": "1",
"size": "1198",
"license": "mit",
"hash": 8495906587013659000,
"line_mean": 26.2272727273,
"line_max": 106,
"alpha_frac": 0.6861435726,
"autogenerated": false,
"ratio": 3.83974358974359,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0038561068549876217,
"num_lines": 44
} |
# A lot of this code come from https://github.com/sympy/sympy_gamma/blob/master/app/logic/logic.py
# http://www.sympygamma.com/
from sympy import latex
from sympy.parsing.sympy_parser import stringify_expr, eval_expr, standard_transformations
from .parser import Parser
PREEXEC = """from sympy import *"""
def evaluate(inputFormulaString, debug=False):
parser = Parser(inputFormulaString)
inputFormulaTree = parser.normalize(debug=debug)
if debug:
print(inputFormulaTree)
if isinstance(inputFormulaTree, str):
outputRawString, outputLatex = eval_input(inputFormulaTree)
else:
outputRawString, outputLatex = str(inputFormulaTree), latex(inputFormulaTree)
return outputRawString, outputLatex
def eval_input(inputTree):
namespace = {}
exec(PREEXEC, {}, namespace)
def plot(f=None, **kwargs):
pass
namespace.update({
'plot': plot, # prevent textplot from printing stuff
'help': lambda f: f
})
transformations = list(standard_transformations)
parsed = stringify_expr(inputTree, {}, namespace, transformations)
try:
evaluated = eval_expr(parsed, {}, namespace)
except SyntaxError:
raise
except Exception as e:
raise ValueError(str(e))
return str(evaluated), latex(evaluated)
| {
"repo_name": "ProjetPP/PPP-CAS",
"path": "ppp_cas/evaluator.py",
"copies": "1",
"size": "1319",
"license": "mit",
"hash": -8167058103365730000,
"line_mean": 30.4047619048,
"line_max": 98,
"alpha_frac": 0.6959818044,
"autogenerated": false,
"ratio": 3.913946587537092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0024379085623539385,
"num_lines": 42
} |
# A lot of this code exists to deal w/ the broken ECS connect_to_region
# function, and will be removed once this pull request is accepted:
# https://github.com/boto/boto/pull/3143
import logging
logger = logging.getLogger(__name__)
from boto.regioninfo import get_regions
from boto.ec2containerservice.layer1 import EC2ContainerServiceConnection
def regions():
return get_regions('ec2containerservice',
connection_cls=EC2ContainerServiceConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
def create_clusters(region, namespace, mappings, parameters, **kwargs):
""" Creates ECS clusters.
Expects a 'clusters' argument, which should contain a list of cluster
names to create.
"""
conn = connect_to_region(region)
try:
clusters = kwargs['clusters']
except KeyError:
logger.error("setup_clusters hook missing 'clusters' argument")
return False
if isinstance(clusters, basestring):
clusters = [clusters]
for cluster in clusters:
logger.debug("Creating ECS cluster: %s", cluster)
conn.create_cluster(cluster)
return True
| {
"repo_name": "federicobaldo/stacker",
"path": "stacker/hooks/ecs.py",
"copies": "2",
"size": "1284",
"license": "bsd-2-clause",
"hash": 412234270778504100,
"line_mean": 28.8604651163,
"line_max": 73,
"alpha_frac": 0.6884735202,
"autogenerated": false,
"ratio": 4.196078431372549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.588455195157255,
"avg_score": null,
"num_lines": null
} |
# a lot of this code is mostly from django.contrib.auth.forms
from woodstock.models import Participant, Invitee, Salutation
from woodstock import settings
from woodstock.fields import EventPartsChoiceField, EventPartsMultipleChoiceField
from pennyblack import send_newsletter
from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import SetPasswordForm as AuthSetPasswordForm
from django.core import exceptions
from django.db.models.query import QuerySet
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
class ParticipantForm(forms.ModelForm):
salutation = forms.ModelChoiceField(label=_('Salutation'), queryset=[])
class Meta:
model = Participant
fields = settings.PARTICIPANT_FORM_FIELDS
def __init__(self, *args, **kwargs):
if not 'request' in kwargs:
raise exceptions.ImproperlyConfigured('ParticipantForm needs the request.')
self.request = kwargs['request']
del kwargs['request']
if 'autoattend_parts' in kwargs:
self.autoattend_parts = kwargs['autoattend_parts']
del kwargs['autoattend_parts']
if 'event_parts_queryset' in kwargs:
event_parts_queryset = kwargs['event_parts_queryset']
del kwargs['event_parts_queryset']
if isinstance(self.request.user, Invitee) and 'instance' not in kwargs \
and settings.PARTICIPANT_FORM_PREPOPULATE:
kwargs['initial'] = tuple((field, getattr(self.request.user, field, None)) for field in self._meta.fields)
super(ParticipantForm, self).__init__(*args, **kwargs)
if not 'salutation' in self._meta.fields:
del self.fields['salutation']
else:
self.fields['salutation'].queryset = Salutation.objects.localized()
if 'event_parts_queryset' in locals():
if settings.SUBSCRIPTION_ALLOW_MULTIPLE_EVENTPARTS:
self.fields['event_part'] = EventPartsMultipleChoiceField(queryset=event_parts_queryset, label=settings.PARTICIPANT_FORM_PART_FIELD_LABEL)
else:
self.fields['event_part'] = EventPartsChoiceField(queryset=event_parts_queryset, label=settings.PARTICIPANT_FORM_PART_FIELD_LABEL)
def save(self):
super(ParticipantForm, self).save(commit=False)
if isinstance(self.request.user, Invitee):
invitation = self.request.user
self.instance.invitee = invitation
self.instance.language = translation.get_language()
for field_name in settings.PARTICIPANT_FORM_COPY_FIELDS:
setattr(self.instance, field_name, getattr(invitation, field_name))
self.instance.save()
parts = []
if 'event_part' in self.fields:
if isinstance(self.cleaned_data['event_part'], QuerySet):
parts += self.cleaned_data['event_part']
else:
parts += [self.cleaned_data['event_part']]
if hasattr(self, 'autoattend_parts'):
parts += self.autoattend_parts
result = self.instance.attend_events(parts)
if not result:
self.instance.delete()
return False
return self.instance
class LostPasswordForm(forms.Form):
email = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_email(self):
"""
Validates that a user exists with the given e-mail address.
"""
email = self.cleaned_data["email"]
self.users_cache = Participant.objects.filter(email__iexact=email)
if len(self.users_cache) == 0:
raise forms.ValidationError(_("That e-mail address doesn't have an associated user account. Are you sure you've registered?"))
return email
def save(self):
"""
Generates a one-use only link for resetting password and sends to the user
"""
for user in self.users_cache:
send_newsletter(settings.LOST_PASSWORD_NEWSLETTER, user)
class SetPasswordForm(AuthSetPasswordForm):
"""
A form that lets a user change set his/her password without
entering the old password
"""
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
def clean_password1(self):
password1 = self.cleaned_data["new_password1"]
if len(password1) < settings.PARTICIPANT_MIN_PASSWORD_LENGTH:
raise forms.ValidationError(_("The password needs to be %d characters long.") % settings.PARTICIPANT_MIN_PASSWORD_LENGTH)
return password1
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
old_password = forms.CharField(label=_("Old password"), widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(_("Your old password was entered incorrectly. Please enter it again."))
return old_password
PasswordChangeForm.base_fields.keyOrder = ['old_password', 'new_password1', 'new_password2']
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput)
salutation = forms.ModelChoiceField(queryset=[],
label=_('Salutation'), empty_label=None, widget=forms.RadioSelect)
class Meta:
model = Participant
exclude = ('last_login', 'language', 'is_active', 'invitee', 'password', 'event_parts')
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.fields['salutation'].queryset = Salutation.objects.localized()
def clean_password1(self):
password1 = self.cleaned_data["password1"]
if len(password1) < settings.PARTICIPANT_MIN_PASSWORD_LENGTH:
raise forms.ValidationError(_("The password needs to be %d characters long.") % settings.PARTICIPANT_MIN_PASSWORD_LENGTH)
return password1
def clean_password2(self):
password1 = self.cleaned_data.get("password1", "")
password2 = self.cleaned_data["password2"]
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
participant = super(RegisterForm, self).save(commit=False)
participant.set_password(self.cleaned_data["password1"])
participant.is_active = not settings.SUBSCRIPTION_NEEDS_ACTIVATION
if commit:
participant.save()
if not participant.is_active:
send_newsletter(settings.ACTIVATION_NEWSLETTER, participant)
return participant
class CodeAuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(CodeAuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
password = self.cleaned_data.get('password')
if password:
self.user_cache = authenticate(username=None, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct username and password. Note that both fields are case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
| {
"repo_name": "allink/woodstock",
"path": "woodstock/forms.py",
"copies": "1",
"size": "9421",
"license": "bsd-3-clause",
"hash": 2910563610756769300,
"line_mean": 40.8711111111,
"line_max": 154,
"alpha_frac": 0.6582103811,
"autogenerated": false,
"ratio": 4.245606128886886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013533940133843374,
"num_lines": 225
} |
"""Alpenhorn client interface for operations on `ArchiveAcq`s."""
import re
import sys
import click
import peewee as pw
import alpenhorn.acquisition as ac
import alpenhorn.archive as ar
import alpenhorn.storage as st
from .connect_db import config_connect
RE_LOCK_FILE = re.compile(r"^\..*\.lock$")
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Commands operating on archival data products. Use to list acquisitions, their contents, and locations of copies."""
@cli.command(name="list")
@click.argument("node_name", required=False)
def acq_list(node_name):
"""List known acquisitions. With NODE specified, list acquisitions with files on NODE."""
config_connect()
import tabulate
if node_name:
try:
node = st.StorageNode.get(name=node_name)
except pw.DoesNotExist:
print("No such storage node:", node_name)
sys.exit(1)
query = (
ar.ArchiveFileCopy.select(
ac.ArchiveAcq.name, pw.fn.count(ar.ArchiveFileCopy.id)
)
.join(ac.ArchiveFile)
.join(ac.ArchiveAcq)
.where(ar.ArchiveFileCopy.node == node)
.group_by(ac.ArchiveAcq.id)
)
else:
query = (
ac.ArchiveAcq.select(ac.ArchiveAcq.name, pw.fn.COUNT(ac.ArchiveFile.id))
.join(ac.ArchiveFile, pw.JOIN.LEFT_OUTER)
.group_by(ac.ArchiveAcq.name)
)
data = query.tuples()
if data:
print(tabulate.tabulate(data, headers=["Name", "Files"]))
else:
print("No matching acquisitions")
@cli.command()
@click.argument("acquisition")
@click.argument("node_name", required=False)
def files(acquisition, node_name):
"""List files that are in the ACQUISITION. With NODE specified, list acquisitions with files on NODE."""
config_connect()
import tabulate
try:
acq = ac.ArchiveAcq.get(name=acquisition)
except pw.DoesNotExist:
print("No such acquisition:", acquisition)
sys.exit(1)
if node_name:
try:
node = st.StorageNode.get(name=node_name)
except pw.DoesNotExist:
print("No such storage node:", node_name)
sys.exit(1)
query = (
ac.ArchiveFile.select(
ac.ArchiveFile.name,
ar.ArchiveFileCopy.size_b,
ar.ArchiveFileCopy.has_file,
ar.ArchiveFileCopy.wants_file,
)
.join(ar.ArchiveFileCopy)
.where(
ac.ArchiveFile.acq == acq,
ar.ArchiveFileCopy.node == node,
)
)
headers = ["Name", "Size", "Has", "Wants"]
else:
query = ac.ArchiveFile.select(
ac.ArchiveFile.name, ac.ArchiveFile.size_b, ac.ArchiveFile.md5sum
).where(ac.ArchiveFile.acq_id == acq.id)
headers = ["Name", "Size", "MD5"]
data = query.tuples()
if data:
print(tabulate.tabulate(data, headers=headers))
else:
print("No registered archive files.")
@cli.command()
@click.argument("acquisition")
def where(acquisition):
"""List locations of files that are in the ACQUISITION."""
config_connect()
import tabulate
try:
acq = ac.ArchiveAcq.get(name=acquisition)
except pw.DoesNotExist:
print("No such acquisition:", acquisition)
sys.exit(1)
nodes = (
st.StorageNode.select()
.join(ar.ArchiveFileCopy)
.join(ac.ArchiveFile)
.where(ac.ArchiveFile.acq == acq)
.distinct()
).execute()
if not nodes:
print("No registered archive files.")
return
for node in nodes:
print("Storage node:", node.name)
query = (
ac.ArchiveFile.select(
ac.ArchiveFile.name,
ar.ArchiveFileCopy.size_b,
ar.ArchiveFileCopy.has_file,
ar.ArchiveFileCopy.wants_file,
)
.join(ar.ArchiveFileCopy)
.where(
ac.ArchiveFile.acq == acq,
ar.ArchiveFileCopy.node == node,
)
)
headers = ["Name", "Size", "Has", "Wants"]
data = query.tuples()
print(tabulate.tabulate(data, headers=headers))
print()
@cli.command()
@click.argument("acquisition")
@click.argument("source_node")
@click.argument("destination_group")
def syncable(acquisition, source_node, destination_group):
"""List all files that are in the ACQUISITION that still need to be moved to DESTINATION_GROUP and are available on SOURCE_NODE."""
config_connect()
import tabulate
try:
acq = ac.ArchiveAcq.get(name=acquisition)
except pw.DoesNotExist:
print("No such acquisition:", acquisition)
try:
src = st.StorageNode.get(name=source_node)
except pw.DoesNotExist:
print("No such storage node:", source_node)
sys.exit(1)
try:
dest = st.StorageGroup.get(name=destination_group)
except pw.DoesNotExist:
print("No such storage group:", destination_group)
sys.exit(1)
# First get the nodes at the destination...
nodes_at_dest = st.StorageNode.select().where(st.StorageNode.group == dest)
# Then use this to get a list of all files at the destination...
files_at_dest = (
ac.ArchiveFile.select()
.join(ar.ArchiveFileCopy)
.where(
ac.ArchiveFile.acq == acq,
ar.ArchiveFileCopy.node << nodes_at_dest,
ar.ArchiveFileCopy.has_file == "Y",
)
)
# Then combine to get all file(copies) that are available at the source but
# not at the destination...
query = (
ac.ArchiveFile.select(
ac.ArchiveFile.name,
ac.ArchiveFile.size_b,
)
.where(ac.ArchiveFile.acq == acq)
.join(ar.ArchiveFileCopy)
.where(
ar.ArchiveFileCopy.node == src,
ar.ArchiveFileCopy.has_file == "Y",
~(ar.ArchiveFile.id << files_at_dest),
)
)
data = query.tuples()
if data:
print(tabulate.tabulate(data, headers=["Name", "Size"]))
else:
print(
"No files to copy from node '",
source_node,
"' to group '",
destination_group,
"'.",
sep="",
)
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/client/acq.py",
"copies": "1",
"size": "6420",
"license": "mit",
"hash": 8116609060596728000,
"line_mean": 27.281938326,
"line_max": 135,
"alpha_frac": 0.5809968847,
"autogenerated": false,
"ratio": 3.7369033760186263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48179002607186266,
"avg_score": null,
"num_lines": null
} |
"""Alpenhorn client interface for operations on `StorageGroup`s."""
import click
import peewee as pw
import alpenhorn.storage as st
from .connect_db import config_connect
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Commands operating on storage groups. Use to create, modify, and list groups."""
@cli.command()
@click.argument("group_name", metavar="GROUP")
@click.option("--notes", metavar="NOTES")
def create(group_name, notes):
"""Create a storage GROUP and add to database."""
config_connect()
try:
st.StorageGroup.get(name=group_name)
print('Group name "%s" already exists! Try a different name!' % group_name)
exit(1)
except pw.DoesNotExist:
st.StorageGroup.create(name=group_name, notes=notes)
print('Added group "%s" to database.' % group_name)
@cli.command()
def list():
"""List known storage groups."""
config_connect()
import tabulate
data = st.StorageGroup.select(st.StorageGroup.name, st.StorageGroup.notes).tuples()
if data:
print(tabulate.tabulate(data, headers=["Name", "Notes"]))
@cli.command()
@click.argument("group_name", metavar="GROUP")
@click.argument("new_name", metavar="NEW-NAME")
def rename(group_name, new_name):
"""Change the name of a storage GROUP to NEW-NAME."""
config_connect()
try:
group = st.StorageGroup.get(name=group_name)
try:
st.StorageGroup.get(name=new_name)
print('Group "%s" already exists.' % new_name)
exit(1)
except pw.DoesNotExist:
group.name = new_name
group.save()
print("Updated.")
except pw.DoesNotExist:
print('Group "%s" does not exist!' % group_name)
exit(1)
@cli.command()
@click.argument("group_name", metavar="GROUP")
@click.option("--notes", help="Value for the notes field", metavar="NOTES")
def modify(group_name, notes):
"""Change the properties of a storage GROUP."""
config_connect()
try:
group = st.StorageGroup.get(name=group_name)
if notes is not None:
if notes == "":
notes = None
group.notes = notes
group.save()
print("Updated.")
else:
print("Nothing to do.")
except pw.DoesNotExist:
print('Group "%s" does not exist!' % group_name)
exit(1)
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/client/group.py",
"copies": "1",
"size": "2419",
"license": "mit",
"hash": -3725861690557179400,
"line_mean": 27.4588235294,
"line_max": 87,
"alpha_frac": 0.6138900372,
"autogenerated": false,
"ratio": 3.7620528771384136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9873905735830647,
"avg_score": 0.0004074357015533486,
"num_lines": 85
} |
"""Alpenhorn client interface for operations on `StorageNode`s."""
import os
import re
import sys
from collections import defaultdict
import click
import peewee as pw
import alpenhorn.acquisition as ac
import alpenhorn.archive as ar
import alpenhorn.storage as st
import alpenhorn.util as util
from alpenhorn import db
from .connect_db import config_connect
RE_LOCK_FILE = re.compile(r"^\..*\.lock$")
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Commands operating on storage nodes. Use to create, modify, mount drives, etc."""
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.argument("root", metavar="ROOT")
@click.argument("hostname", metavar="HOSTNAME")
@click.argument("group", metavar="GROUP", type=str, default=None)
@click.option(
"--address",
help="Domain name or IP address for the host \
(if network accessible).",
metavar="ADDRESS",
type=str,
default=None,
)
@click.option(
"--active", help="Is the node active?", metavar="BOOL", type=bool, default=False
)
@click.option(
"--auto_import",
help="Should files that appear on this node be \
automatically added?",
metavar="BOOL",
type=bool,
default=False,
)
@click.option(
"--suspect",
help="Is this node corrupted?",
metavar="BOOL",
type=bool,
default=False,
)
@click.option(
"--storage_type",
help="What is the type of storage? Options:\
A - archive for the data, T - for transiting data \
F - for data in the field (i.e acquisition machines)",
type=click.Choice(["A", "T", "F"]),
default="A",
)
@click.option(
"--max_total_gb",
help="The maximum amount of storage we should \
use.",
metavar="FLOAT",
type=float,
default=-1.0,
)
@click.option(
"--min_avail_gb",
help="What is the minimum amount of free space \
we should leave on this node?",
metavar="FLOAT",
type=float,
default=-1.0,
)
@click.option(
"--min_delete_age_days",
help="What is the minimum amount of time \
a file must remain on the node before we are allowed to delete \
it?",
metavar="FLOAT",
type=float,
default=30,
)
@click.option(
"--notes", help="Any notes or comments about this node.", type=str, default=None
)
def create(
node_name,
root,
hostname,
group,
address,
active,
auto_import,
suspect,
storage_type,
max_total_gb,
min_avail_gb,
min_delete_age_days,
notes,
):
"""Create a storage NODE within storage GROUP with a ROOT directory on
HOSTNAME.
"""
config_connect()
try:
this_group = st.StorageGroup.get(name=group)
except pw.DoesNotExist:
print('Requested group "%s" does not exit in DB.' % group)
exit(1)
try:
this_node = st.StorageNode.get(name=node_name)
print('Node name "%s" already exists! Try a different name!' % node_name)
exit(1)
except pw.DoesNotExist:
st.StorageNode.create(
name=node_name,
root=root,
host=hostname,
address=address,
group=this_group.id,
active=active,
auto_import=auto_import,
suspect=suspect,
storage_type=storage_type,
max_total_gb=max_total_gb,
min_avail_gb=min_avail_gb,
min_delete_age_days=min_delete_age_days,
notes=notes,
)
print(
'Added node "%(node)s" belonging to group "%(group)s" in the directory '
'"%(root)s" at host "%(host)s" to database.'
% dict(node=node_name, root=root, group=group, host=hostname)
)
@cli.command(name="list")
def node_list():
"""List known storage nodes."""
config_connect()
import tabulate
data = (
st.StorageNode.select(
st.StorageNode.name,
st.StorageGroup.name,
st.StorageNode.storage_type,
st.StorageNode.host,
st.StorageNode.root,
st.StorageNode.notes,
)
.join(st.StorageGroup)
.tuples()
)
if data:
print(
tabulate.tabulate(
data, headers=["Name", "Group", "Type", "Host", "Root", "Notes"]
)
)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.argument("new_name", metavar="NEW-NAME")
def rename(node_name, new_name):
"""Change the name of a storage NODE to NEW-NAME."""
config_connect()
try:
node = st.StorageNode.get(name=node_name)
try:
st.StorageNode.get(name=new_name)
print('Node "%s" already exists.' % new_name)
exit(1)
except pw.DoesNotExist:
node.name = new_name
node.save()
print("Updated.")
except pw.DoesNotExist:
print('Node "%s" does not exist!' % node_name)
exit(1)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option(
"--max_total_gb",
help="New maximum amount of storage to use.",
metavar="FLOAT",
type=float,
)
@click.option(
"--min_avail_gb",
help="New minimum amount of free space to " "leave on the node",
metavar="FLOAT",
type=float,
)
@click.option(
"--min_delete_age_days",
help="New minimum amount of time "
"a file must remain on the node before we are allowed to delete "
"it.",
metavar="FLOAT",
type=float,
)
@click.option("--notes", help="New value for the notes field", metavar="NOTES")
def modify(node_name, max_total_gb, min_avail_gb, min_delete_age_days, notes):
"""Change the properties of a storage NODE."""
config_connect()
try:
node = st.StorageNode.get(name=node_name)
changed = False
if max_total_gb is not None:
node.max_total_gb = max_total_gb
changed = True
if min_avail_gb is not None:
node.min_avail_gb = min_avail_gb
changed = True
if min_delete_age_days is not None:
node.min_delete_age_days = min_delete_age_days
changed = True
if notes is not None:
if notes == "":
notes = None
node.notes = notes
changed = True
if changed:
node.save()
print("Updated.")
else:
print("Nothing to do.")
except pw.DoesNotExist:
print('Node "%s" does not exist!' % node_name)
exit(1)
@cli.command()
@click.argument("name")
@click.option("--path", help="Root path for this node", type=str, default=None)
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
@click.option(
"--hostname",
type=str,
default=None,
help="hostname running the alpenhornd instance for this node (set to this hostname by default).",
)
def activate(name, path, user, address, hostname):
"""Interactive routine for activating a storage node located at ROOT."""
config_connect()
try:
node = st.StorageNode.get(name=name)
except pw.DoesNotExist:
click.echo('Storage node "%s" does not exist. I quit.' % name)
exit(1)
if node.active:
click.echo('Node "%s" is already active.' % name)
return
if path is not None:
node.root = path
if not util.alpenhorn_node_check(node):
click.echo('Node "{}" does not match ALPENHORN_NODE'.format(node.name))
exit(1)
# Set the default hostname if required
if hostname is None:
hostname = util.get_short_hostname()
click.echo('I will set the host to "%s".' % hostname)
# Set the parameters of this node
node.username = user
node.address = address
node.active = True
node.host = hostname
node.save()
click.echo('Successfully activated "%s".' % name)
@cli.command()
@click.argument("root_or_name")
def deactivate(root_or_name):
"""Deactivate a storage node with location or named ROOT_OR_NAME."""
import os
config_connect()
try:
node = st.StorageNode.get(name=root_or_name)
except pw.DoesNotExist:
if root_or_name[-1] == "/":
root_or_name = root_or_name[: len(root_or_name) - 1]
if not os.path.exists(root_or_name):
click.echo("That is neither a node name, nor a path on this host. I quit.")
exit(1)
try:
node = st.StorageNode.get(root=root_or_name, host=util.get_short_hostname())
except pw.DoesNotExist:
click.echo(
"That is neither a node name, nor a root name that is known. I quit."
)
exit(1)
if not node.active:
click.echo("There is no active node there any more.")
else:
node.active = False
node.save()
print("Node successfully deactivated.")
@cli.command()
@click.option(
"--host",
"-H",
help="Use specified host rather than local machine",
type=str,
default=None,
)
def active(host):
"""List the nodes active on this, or another specified, machine"""
config_connect()
if host is None:
host = util.get_short_hostname()
zero = True
for node in st.StorageNode.select().where(
st.StorageNode.host == host, st.StorageNode.active
):
n_file = (
ar.ArchiveFileCopy.select()
.where(
(ar.ArchiveFileCopy.node == node) & (ar.ArchiveFileCopy.has_file == "Y")
)
.count()
)
print("%-25s %-30s %5d files" % (node.name, node.root, n_file))
zero = False
if zero:
print("No nodes are active on host %s." % host)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("-v", "--verbose", count=True)
@click.option(
"--acq",
help="Limit import to specified acquisition directories.",
multiple=True,
default=None,
)
@click.option(
"--register-new", help="Register new files instead of ignoring them.", is_flag=True
)
@click.option("--dry", "-d", help="Dry run. Do not modify database.", is_flag=True)
def scan(node_name, verbose, acq, register_new, dry):
"""Scan the current directory for known acquisition files and add them into the database for NODE.
This command is useful for manually maintaining an archive where we cannot
run alpenhornd in the usual manner.
"""
config_connect()
# Keep track of state as we process the files
added_files = [] # Files we have added to the database
corrupt_files = [] # Known files which are corrupt
registered_files = [] # Files already registered in the database
unknown_files = [] # Files not known in the database
known_acqs = [] # Directories which are known acquisitions
new_acqs = [] # Directories which were newly registered acquisitions
not_acqs = [] # Directories which were not known acquisitions
# Fetch a reference to the node
try:
this_node = (
st.StorageNode.select().where(st.StorageNode.name == node_name).get()
)
except pw.DoesNotExist:
click.echo("Unknown node:", node_name)
exit(1)
cwd = os.getcwd()
# Construct a dictionary of directories that might be acquisitions and the of
# list files that they contain
db_acqs = ac.ArchiveAcq.select(ac.ArchiveAcq.name)
acq_files = defaultdict(list)
if len(acq) == 0:
tops = [cwd]
else:
db_acqs = db_acqs.where(ac.ArchiveAcq.name >> acq)
tops = []
for acq_name in acq:
acq_dir = os.path.join(this_node.root, acq_name)
if not os.path.isdir(acq_dir):
print(
'Aquisition "%s" does not exist in this node. Ignoring.' % acq_name,
file=sys.stderr,
)
continue
if acq_dir == cwd:
# the current directory is one of the limiting acquisitions, so
# we can ignore all others in the `--acq` list
tops = [acq_dir]
break
elif cwd.startswith(acq_dir):
# the current directory is inside one of the limiting
# acquisitions, so we can just walk its subtree
tops = [cwd]
break
elif acq_dir.startswith(cwd):
# the acquisition is inside the current directory, so we can
# just walk its subtree
tops.append(acq_dir)
else:
print(
'Acquisition "%s" is outside the current directory and will be ignored.'
% acq_name,
file=sys.stderr,
)
for top in tops:
for d, ds, fs in os.walk(top):
d = os.path.relpath(d, this_node.root)
if d == ".": # skip the node root directory
continue
acq_type_name = ac.AcqType.detect(d, this_node)
if acq_type_name:
_, acq_name = acq_type_name
if d == acq_name:
# the directory is the acquisition
acq_files[acq_name] += [
f
for f in fs
if not RE_LOCK_FILE.match(f)
and not os.path.isfile(os.path.join(d, ".{}.lock".format(f)))
]
if d.startswith(acq_name + "/"):
# the directory is inside an acquisition
acq_dirname = os.path.relpath(d, acq_name)
acq_files[acq_name] += [
(acq_dirname + "/" + f)
for f in fs
if not RE_LOCK_FILE.match(f)
and not os.path.isfile(os.path.join(d, ".{}.lock".format(f)))
]
else:
not_acqs.append(d)
with click.progressbar(acq_files, label="Scanning acquisitions") as acq_iter:
for acq_name in acq_iter:
try:
acq = ac.ArchiveAcq.select().where(ac.ArchiveAcq.name == acq_name).get()
known_acqs.append(acq_name)
# Fetch lists of all files in this acquisition, and all
# files in this acq with local copies
file_names = [f.name for f in acq.files]
local_file_names = [
f.name
for f in acq.files.join(ar.ArchiveFileCopy).where(
ar.ArchiveFileCopy.node == this_node
)
]
except pw.DoesNotExist:
if register_new:
acq_type, _ = ac.AcqType.detect(acq_name, this_node)
acq = ac.ArchiveAcq(name=acq_name, type=acq_type)
if not dry:
# TODO: refactor duplication with auto_import.add_acq
with db.database_proxy.atomic():
# insert the archive record
acq.save()
# and generate the metadata table
acq_type.acq_info.new(acq, this_node)
new_acqs.append(acq_name)
# Because it's a newly imported acquisition, all files within it are new also
file_names = []
local_file_names = []
else:
not_acqs.append(acq_name)
continue
for f_name in acq_files[acq_name]:
file_path = os.path.join(acq_name, f_name)
# Check if file exists in database
if not register_new and f_name not in file_names:
unknown_files.append(file_path)
continue
# Check if file is already registered on this node
if f_name in local_file_names:
registered_files.append(file_path)
else:
abs_path = os.path.join(this_node.root, file_path)
if f_name in file_names:
# it is a known file
archive_file = (
ac.ArchiveFile.select()
.where(
ac.ArchiveFile.name == f_name, ac.ArchiveFile.acq == acq
)
.get()
)
# TODO: decide if, when the file is corrupted, we still
# register the file as `has_file="X"` or just _continue_
if os.path.getsize(abs_path) != archive_file.size_b:
corrupt_files.append(file_path)
continue
else:
if verbose > 2:
print('Computing md5sum of "{}"'.format(f_name))
md5sum = util.md5sum_file(abs_path, cmd_line=False)
if md5sum != archive_file.md5sum:
corrupt_files.append(file_path)
continue
else:
# not a known file, register the new ArchiveFile instance
file_type = ac.FileType.detect(f_name, acq, this_node)
if not file_type:
unknown_files.append(file_path)
continue
if verbose > 2:
print('Computing md5sum of "{}"'.format(f_name))
md5sum = util.md5sum_file(abs_path, cmd_line=False)
size_b = os.path.getsize(abs_path)
archive_file = ac.ArchiveFile(
name=f_name,
acq=acq,
type=file_type,
size_b=size_b,
md5sum=md5sum,
)
if not dry:
archive_file.save()
added_files.append(file_path)
if not dry:
copy_size_b = os.stat(abs_path).st_blocks * 512
ar.ArchiveFileCopy.create(
file=archive_file,
node=this_node,
has_file="Y",
wants_file="Y",
size_b=copy_size_b,
)
# now find the minimum unknown acqs paths that we can report
not_acqs_roots = []
last_acq_root = ""
for d in sorted(not_acqs):
common = os.path.commonprefix([last_acq_root, d])
if common == "":
for acq_name in known_acqs:
if acq_name.startswith(d):
break
else:
for acq_name in new_acqs:
if acq_name.startswith(d):
break
else:
not_acqs_roots.append(d)
last_acq_root = d
print("\n==== Summary ====")
print()
if register_new:
print("Registered %i new acquisitions" % len(new_acqs))
print("Added %i files" % len(added_files))
print()
print("%i corrupt files." % len(corrupt_files))
print("%i files already registered." % len(registered_files))
print("%i files not known" % len(unknown_files))
print("%i directories were not acquisitions." % len(not_acqs_roots))
if verbose > 0:
print()
if register_new:
print("New acquisitions:")
for an in sorted(new_acqs):
print(an)
print()
print("Added files:")
for fn in sorted(added_files):
print(fn)
print()
if verbose > 1:
print("Corrupt:")
for fn in sorted(corrupt_files):
print(fn)
print()
print("Unknown files:")
for fn in sorted(unknown_files):
print(fn)
print()
print("Unknown acquisitions:")
for fn in sorted(not_acqs_roots):
print(fn)
print()
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("--md5", help="perform full check against md5sum", is_flag=True)
@click.option(
"--fixdb", help="fix up the database to be consistent with reality", is_flag=True
)
@click.option(
"--acq",
metavar="ACQ",
multiple=True,
help="Limit verification to specified acquisitions. Use repeated --acq flags to specify multiple acquisitions.",
)
def verify(node_name, md5, fixdb, acq):
"""Verify the archive on NODE against the database.
If there are no issues with the archive returns with exit status of zero,
non-zero if there are issues. Specifically:
`0`
No problems.
`1`
Corrupt files found.
`2`
Files missing from archive.
`3`
Both corrupt and missing files.
"""
import os
config_connect()
try:
this_node = st.StorageNode.get(name=node_name)
except pw.DoesNotExist:
click.echo('Storage node "{}" does not exist.'.format(node_name))
exit(1)
if not this_node.active:
click.echo('Node "{}" is not active.'.format(node_name))
exit(1)
if not util.alpenhorn_node_check(this_node):
click.echo(
'Node "{}" does not match ALPENHORN_NODE: {}'.format(
node_name, this_node.root
)
)
exit(1)
# Use a complicated query with a tuples construct to fetch everything we
# need in a single query. This massively speeds up the whole process versus
# fetching all the FileCopy's then querying for Files and Acqs.
lfiles = (
ac.ArchiveFile.select(
ac.ArchiveFile.name,
ac.ArchiveAcq.name,
ac.ArchiveFile.size_b,
ac.ArchiveFile.md5sum,
ar.ArchiveFileCopy.id,
)
.join(ac.ArchiveAcq)
.switch(ac.ArchiveFile)
.join(ar.ArchiveFileCopy)
.where(ar.ArchiveFileCopy.node == this_node, ar.ArchiveFileCopy.has_file == "Y")
)
if acq:
lfiles = lfiles.where(ac.ArchiveAcq.name << acq)
missing_files = []
corrupt_files = []
missing_ids = []
corrupt_ids = []
nfiles = 0
with click.progressbar(lfiles.tuples(), label="Scanning files") as lfiles_iter:
for filename, acqname, filesize, md5sum, fc_id in lfiles_iter:
nfiles += 1
filepath = this_node.root + "/" + acqname + "/" + filename
# Check if file is plain missing
if not os.path.exists(filepath):
missing_files.append(filepath)
missing_ids.append(fc_id)
continue
if md5:
file_md5 = util.md5sum_file(filepath)
corrupt = file_md5 != md5sum
else:
corrupt = os.path.getsize(filepath) != filesize
if corrupt:
corrupt_files.append(filepath)
corrupt_ids.append(fc_id)
continue
if len(missing_files) > 0:
click.echo()
click.echo("=== Missing files ===")
for fname in missing_files:
click.echo(fname)
if len(corrupt_files) > 0:
print()
click.echo("=== Corrupt files ===")
for fname in corrupt_files:
click.echo(fname)
click.echo()
click.echo("=== Summary ===")
click.echo(" %i total files" % nfiles)
click.echo(" %i missing files" % len(missing_files))
click.echo(" %i corrupt files" % len(corrupt_files))
click.echo()
# Fix up the database by marking files as missing, and marking
# corrupt files for verification by alpenhornd.
if fixdb:
# TODO: ensure write access to the database
# # We need to write to the database.
# di.connect_database(read_write=True)
if (len(missing_files) > 0) and click.confirm("Fix missing files"):
missing_count = (
ar.ArchiveFileCopy.update(has_file="N")
.where(ar.ArchiveFileCopy.id << missing_ids)
.execute()
)
click.echo(" %i marked as missing" % missing_count)
if (len(corrupt_files) > 0) and click.confirm("Fix corrupt files"):
corrupt_count = (
ar.ArchiveFileCopy.update(has_file="M")
.where(ar.ArchiveFileCopy.id << corrupt_ids)
.execute()
)
click.echo(" %i corrupt files marked for verification" % corrupt_count)
else:
# Set the exit status
status = 1 if corrupt_files else 0
status += 2 if missing_files else 0
exit(status)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option(
"--days", "-d", help="Clean files older than <days>.", type=int, default=None
)
@click.option("--cancel", help="Cancel files marked for cleaning", is_flag=True)
@click.option("--force", "-f", help="Force cleaning on an archive node.", is_flag=True)
@click.option("--now", "-n", help="Force immediate removal.", is_flag=True)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only clean files already available in this group.",
)
@click.option(
"--acq", metavar="ACQ", default=None, type=str, help="Limit removal to acquisition."
)
def clean(node_name, days, cancel, force, now, target, acq):
"""Clean up NODE by marking older files as potentially removable.
Files will never be removed until they are available on at least two
archival nodes.
Normally, files are marked to be removed only if the disk space on the node
is running low. With the --now flag, they will be made available for
immediate removal. Either way, they will *never* be actually removed until
there are sufficient archival copies.
Using the --cancel option undoes previous cleaning operations by marking
files that are still on the node and that were marked as available for
removal as "must keep".
If --target is specified, the command will only affect files already
available in the TARGET_GROUP. This is useful for cleaning out intermediate
locations such as transport disks.
Using the --days flag will only clean correlator and housekeeping
files which have a timestamp associated with them. It will not
touch other types. If no --days flag is given, all files will be
considered for removal.
"""
if cancel and now:
print("Options --cancel and --now are mutually exclusive.")
exit(1)
config_connect()
try:
this_node = st.StorageNode.get(st.StorageNode.name == node_name)
except pw.DoesNotExist:
print('Storage node "%s" does not exist.' % node_name)
exit(1)
# Check to see if we are on an archive node
if this_node.storage_type == "A":
if force or click.confirm(
'DANGER: run clean on archive node "%s"?' % node_name
):
print('"%s" is an archive node. Forcing clean.' % node_name)
else:
print('Cannot clean archive node "%s" without forcing.' % node_name)
exit(1)
# Select FileCopys on this node.
files = ar.ArchiveFileCopy.select(ar.ArchiveFileCopy.id).where(
ar.ArchiveFileCopy.node == this_node, ar.ArchiveFileCopy.has_file == "Y"
)
if now:
# In 'now' cleaning, every copy will be set to wants_file="No", if it
# wasn't already
files = files.where(ar.ArchiveFileCopy.wants_file != "N")
elif cancel:
# Undo any "Maybe" and "No" want_files and reset them to "Yes"
files = files.where(ar.ArchiveFileCopy.wants_file != "Y")
else:
# In regular cleaning, we only mark as "Maybe" want_files that are
# currently "Yes", but leave "No" unchanged
files = files.where(ar.ArchiveFileCopy.wants_file == "Y")
# Limit to acquisition
if acq is not None:
try:
acq = ac.ArchiveAcq.get(name=acq)
except pw.DoesNotExit:
raise RuntimeError("Specified acquisition %s does not exist" % acq)
files_in_acq = ac.ArchiveFile.select().where(ac.ArchiveFile.acq == acq)
files = files.where(ar.ArchiveFileCopy.file << files_in_acq)
# If the target option has been specified, only clean files also available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = st.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = st.StorageNode.select().where(
st.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
ac.ArchiveFile.select()
.join(ar.ArchiveFileCopy)
.where(
ar.ArchiveFileCopy.node << nodes_at_target,
ar.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also available at the target
files = files.where(ar.ArchiveFileCopy.file << files_at_target)
# If --days has been set we need to restrict to files older than the given
# time. This only works for a few particular file types
if days is not None and days > 0:
# TODO: how to handle file types now?
raise "'--days' feature has not been implemented yet"
# # Get the time for the oldest files to keep
# oldest = datetime.datetime.now() - datetime.timedelta(days)
# oldest_unix = ephemeris.ensure_unix(oldest)
#
# # List of filetypes we want to update, needs a human readable name and a
# # FileInfo table.
# filetypes = [ ['correlation', di.CorrFileInfo],
# ['housekeeping', di.HKFileInfo] ]
#
# file_ids = []
#
# # Iterate over file types for cleaning
# for name, infotable in filetypes:
#
# # Filter to fetch only ones with a start time older than `oldest`
# oldfiles = files.join(ac.ArchiveFile).join(infotable)\
# .where(infotable.start_time < oldest_unix)
#
# local_file_ids = list(oldfiles)
#
# # Get number of correlation files
# count = oldfiles.count()
#
# if count > 0:
# size_bytes = ar.ArchiveFileCopy.select().where(ar.ArchiveFileCopy.id << local_file_ids)\
# .join(ac.ArchiveFile).aggregate(pw.fn.Sum(ac.ArchiveFile.size_b))
#
# size_gb = int(size_bytes) / 2**30.0
#
# print "Cleaning up %i %s files (%.1f GB) from %s " % (count, name, size_gb, node_name)
#
# file_ids += local_file_ids
# If days is not set, then just select all files that meet the requirements so far
else:
file_ids = [f for f in files]
count = files.count()
if count > 0:
size_bytes = (
ar.ArchiveFileCopy.select()
.where(ar.ArchiveFileCopy.id << file_ids)
.join(ac.ArchiveFile)
.select(pw.fn.Sum(ac.ArchiveFile.size_b))
.scalar()
)
size_gb = int(size_bytes) / 1073741824.0
print(
'Mark %i files (%.1f GB) from "%s" %s.'
% (
count,
size_gb,
node_name,
"for keeping" if cancel else "available for removal",
)
)
# If there are any files to clean, ask for confirmation and the mark them in
# the database for removal
if len(file_ids) > 0:
if force or click.confirm(" Are you sure?"):
print(" Marking...")
if cancel:
state = "Y"
else:
state = "N" if now else "M"
update = ar.ArchiveFileCopy.update(wants_file=state).where(
ar.ArchiveFileCopy.id << file_ids
)
n = update.execute()
if cancel:
print("Marked %i files for keeping." % n)
else:
print("Marked %i files available for removal." % n)
else:
print(" Cancelled. Exit without changes.")
else:
print("No files selected for cleaning on %s." % node_name)
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/client/node.py",
"copies": "1",
"size": "33061",
"license": "mit",
"hash": 7336091609572130000,
"line_mean": 31.99500998,
"line_max": 116,
"alpha_frac": 0.546898158,
"autogenerated": false,
"ratio": 3.97893850042123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502583665842123,
"avg_score": null,
"num_lines": null
} |
"""Alpenhorn client interface for operations on transport disks."""
import os
import subprocess
import time
import click
import peewee as pw
import alpenhorn.storage as st
from . import node
from .connect_db import config_connect
# A few utility routines for dealing with filesystems
MAX_E2LABEL_LEN = 16
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""Commands operating on transport nodes. Use to format, mount, etc."""
@cli.command()
def list():
"""List known transport nodes."""
config_connect()
import tabulate
data = (
st.StorageNode.select(
st.StorageNode.name,
pw.Case(st.StorageNode.active, [(True, "Y"), (False, "-")]),
st.StorageNode.host,
st.StorageNode.root,
st.StorageNode.notes,
)
.where(st.StorageNode.storage_type == "T")
.tuples()
)
if data:
print(
tabulate.tabulate(
data, headers=["Name", "Mounted", "Host", "Root", "Notes"]
)
)
@cli.command()
@click.argument("serial_num")
def format(serial_num):
"""Interactive routine for formatting a transport disc as a storage
node; formats and labels the disc as necessary, the adds to the
database. The disk is specified using the manufacturers
SERIAL_NUM, which is printed on the disk.
"""
import glob
import os
config_connect()
if os.getuid() != 0:
print("You must be root to run mount on a transport disc. I quit.")
return
# Find the disc.
dev = glob.glob("/dev/disk/by-id/*%s" % serial_num)
if len(dev) == 0:
print("No disc with that serial number is attached.")
return
elif len(dev) > 1:
print("Confused: found more than one device matching that serial number:")
for d in dev:
print(" %s" % dev)
print("Aborting.")
return
dev = dev[0]
dev_part = "%s-part1" % dev
# Figure out if it is formatted.
print("Checking to see if disc is formatted. Please wait.")
formatted = False
try:
# check if the block device is partitioned
subprocess.check_output(["blkid", "-p", dev])
# now check if the partition is formatted
if "TYPE=" in subprocess.check_output(["blkid", "-p", dev_part]):
formatted = True
except subprocess.CalledProcessError:
pass
if not formatted:
if not click.confirm("Disc is not formatted. Should I format it?"):
return
print("Creating partition. Please wait.")
try:
subprocess.check_call(
[
"parted",
"-s",
"-a",
"optimal",
dev,
"mklabel",
"gpt",
"--",
"mkpart",
"primary",
"0%",
"100%",
]
)
except subprocess.CalledProcessError as e:
print(
"Failed to create the partition! Stat = %s. I quit.\n%s"
% (e.returncode, e.output)
)
exit(1)
# pause to give udev rules time to get updated
time.sleep(1)
print("Formatting disc. Please wait.")
try:
subprocess.check_call(
["mkfs.ext4", dev_part, "-m", "0", "-L", "CH-{}".format(serial_num)]
)
except subprocess.CalledProcessError as e:
print(
"Failed to format the disk! Stat = %s. I quit.\n%s"
% (e.returncode, e.output)
)
exit(1)
else:
print("Disc is already formatted.")
e2label = _get_e2label(dev_part)
name = "CH-%s" % serial_num
if e2label and e2label != name:
print(
"Disc label %s does not conform to labelling standard, "
"which is CH-<serialnum>."
)
exit
elif not e2label:
print('Labelling the disc as "%s" (using e2label) ...' % (name))
assert dev_part is not None
assert len(name) <= MAX_E2LABEL_LEN
try:
subprocess.check_call(["/sbin/e2label", dev_part, name])
except subprocess.CalledProcessError as e:
print(
"Failed to e2label! Stat = %s. I quit.\n%s" % (e.returncode, e.output)
)
exit(1)
# Ensure the mount path exists.
root = "/mnt/%s" % name
if not os.path.isdir(root):
print("Creating mount point %s." % root)
os.mkdir(root)
# Check to see if the disc is mounted.
try:
output = subprocess.check_output(["df"])
dev_part_abs = os.path.realpath(dev_part)
for l in output.split("\n"):
if l.find(root) > 0:
if l[: len(dev_part)] == dev or l[: len(dev_part_abs)] == dev_part_abs:
print("%s is already mounted at %s" % (l.split()[0], root))
else:
print(
"%s is a mount point, but %s is already mounted there."(
root, l.split()[0]
)
)
except subprocess.CalledProcessError as e:
print(
"Failed to check the mountpoint! Stat = %s. I quit.\n%s"
% (e.returncode, e.output)
)
exit(1)
try:
node = st.StorageNode.get(name=name)
except pw.DoesNotExist:
print(
"This disc has not been registered yet as a storage node. "
"Registering now."
)
try:
group = st.StorageGroup.get(name="transport")
except pw.DoesNotExist:
print('Hmmm. Storage group "transport" does not exist. I quit.')
exit(1)
# TODO: ensure write access to the database
# # We need to write to the database.
# di.connect_database(read_write=True)
node = st.StorageNode.create(
name=name, root=root, group=group, storage_type="T", min_avail_gb=1
)
print("Successfully created storage node.")
print("Node created but not activated. Run alpenhorn mount_transport for that.")
@cli.command()
@click.pass_context
@click.argument("node_name", metavar="NODE")
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
def mount(ctx, node_name, user, address):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node_name
if os.path.ismount(mnt_point):
print(
"{} is already mounted in the filesystem. Proceeding to activate it.".format(
node_name
)
)
else:
print("Mounting disc at %s" % mnt_point)
os.system("mount %s" % mnt_point)
ctx.invoke(
node.activate, name=node_name, path=mnt_point, user=user, address=address
)
@cli.command()
@click.pass_context
@click.argument("node_name", metavar="NODE")
def unmount(ctx, node_name):
"""Unmount a transport disk from the system and then remove it from alpenhorn."""
mnt_point = "/mnt/%s" % node_name
print("Unmounting disc at %s" % mnt_point)
os.system("umount %s" % mnt_point)
ctx.invoke(node.deactivate, root_or_name=node_name)
def _get_e2label(dev):
"""Read filesystem label on an Ext{2,3,4}fs device
Parameters
----------
dev: str
The path to the device file.
Returns
-------
str or None
the filesystem label, or None if reading it failed.
"""
try:
output = subprocess.check_output(["/sbin/e2label", dev]).strip()
if len(output) < MAX_E2LABEL_LEN:
return output
except subprocess.CalledProcessError:
return None
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/client/transport.py",
"copies": "1",
"size": "8015",
"license": "mit",
"hash": -886649455953580000,
"line_mean": 28.5756457565,
"line_max": 89,
"alpha_frac": 0.5459762944,
"autogenerated": false,
"ratio": 3.9599802371541504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500595653155415,
"avg_score": null,
"num_lines": null
} |
"""Alpenhorn client interface."""
import datetime
import os
import sys
import click
import peewee as pw
from ch_util import data_index as di
from ch_util import ephemeris
@click.group()
def cli():
"""Client interface for alpenhorn. Use to request transfers, mount drives,
check status etc."""
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.argument("group_name", metavar="GROUP")
@click.option(
"--acq", help="Sync only this acquisition.", metavar="ACQ", type=str, default=None
)
@click.option("--force", "-f", help="proceed without confirmation", is_flag=True)
@click.option("--nice", "-n", help="nice level for transfer", default=0)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only transfer files not available on this group.",
)
@click.option(
"--transport",
"-t",
is_flag=True,
help="[DEPRECATED] transport mode: only copy if fewer than two archived copies exist.",
)
@click.option("--show_acq", help="Summarise acquisitions to be copied.", is_flag=True)
@click.option("--show_files", help="Show files to be copied.", is_flag=True)
def sync(
node_name, group_name, acq, force, nice, target, transport, show_acq, show_files
):
"""Copy all files from NODE to GROUP that are not already present.
We can also use the --target option to only transfer files that are not
available on both the destination group, and the TARGET_GROUP. This is
useful for transferring data to a staging location before going to a final
archive (e.g. HPSS, transport disks).
"""
# Make sure we connect RW
di.connect_database(read_write=True)
try:
from_node = di.StorageNode.get(name=node_name)
except pw.DoesNotExist:
raise Exception('Node "%s" does not exist in the DB.' % node_name)
try:
to_group = di.StorageGroup.get(name=group_name)
except pw.DoesNotExist:
raise Exception('Group "%s" does not exist in the DB.' % group_name)
# Construct list of file copies that are available on the source node, and
# not available on any nodes at the destination. This query is quite complex
# so I've broken it up...
# First get the nodes at the destination...
nodes_at_dest = di.StorageNode.select().where(di.StorageNode.group == to_group)
# Then use this to get a list of all files at the destination...
files_at_dest = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_dest, di.ArchiveFileCopy.has_file == "Y"
)
)
# Then combine to get all file(copies) that are available at the source but
# not at the destination...
copy = di.ArchiveFileCopy.select().where(
di.ArchiveFileCopy.node == from_node,
di.ArchiveFileCopy.has_file == "Y",
~(di.ArchiveFileCopy.file << files_at_dest),
)
# If the target option has been specified, only copy nodes also not
# available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also not available at the target
copy = copy.where(~(di.ArchiveFileCopy.file << files_at_target))
# In transport mode (DEPRECATED) we only move files that don't have an
# archive copy elsewhere...
if transport:
import warnings
warnings.warn("Transport mode is deprecated. Try to use --target instead.")
# Get list of other archive nodes
other_archive_nodes = di.StorageNode.select().where(
di.StorageNode.storage_type == "A", di.StorageNode.id != from_node
)
files_in_archive = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << other_archive_nodes,
di.ArchiveFileCopy.has_file == "Y",
)
)
copy = copy.where(~(di.ArchiveFileCopy.file << files_in_archive))
# Join onto ArchiveFile for later query parts
copy = copy.join(di.ArchiveFile)
# If requested, limit query to a specific acquisition...
if acq is not None:
# Fetch acq if specified
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExist:
raise Exception('Acquisition "%s" does not exist in the DB.' % acq)
# Restrict files to be in the acquisition
copy = copy.where(di.ArchiveFile.acq == acq)
if not copy.count():
print("No files to copy from node %s." % (node_name))
return
# Show acquisitions based summary of files to be copied
if show_acq:
acqs = [c.file.acq.name for c in copy]
import collections
for acq, count in collections.Counter(acqs).items():
print("%s [%i files]" % (acq, count))
# Show all files to be copied
if show_files:
for c in copy:
print("%s/%s" % (c.file.acq.name, c.file.name))
size_bytes = copy.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
size_gb = int(size_bytes) / 1073741824.0
print(
"Will request that %d files (%.1f GB) be copied from node %s to group %s."
% (copy.count(), size_gb, node_name, group_name)
)
if not (force or click.confirm("Do you want to proceed?")):
print("Aborted.")
return
dtnow = datetime.datetime.now()
# Perform update in a transaction to avoid any clobbering from concurrent updates
with di.ArchiveFileCopyRequest._meta.database.atomic():
# Get a list of all the file ids for the copies we should perform
files_ids = [c.file_id for c in copy]
# Get a list of all the file ids for exisiting requests
requests = di.ArchiveFileCopyRequest.select().where(
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
req_file_ids = [req.file_id for req in requests]
# Separate the files into ones that already have requests and ones that don't
files_in = filter(lambda x: x in req_file_ids, files_ids)
files_out = filter(lambda x: x not in req_file_ids, files_ids)
sys.stdout.write(
"Updating %i existing requests and inserting %i new ones.\n"
% (len(files_in), len(files_out))
)
# Perform an update of all the existing copy requests
if len(files_in) > 0:
update = di.ArchiveFileCopyRequest.update(
nice=nice,
completed=False,
cancelled=False,
timestamp=dtnow,
n_requests=di.ArchiveFileCopyRequest.n_requests + 1,
)
update = update.where(
di.ArchiveFileCopyRequest.file << files_in,
di.ArchiveFileCopyRequest.group_to == to_group,
di.ArchiveFileCopyRequest.node_from == from_node,
)
update.execute()
# Insert any new requests
if len(files_out) > 0:
# Construct a list of all the rows to insert
insert = [
{
"file": fid,
"node_from": from_node,
"nice": 0,
"group_to": to_group,
"completed": False,
"n_requests": 1,
"timestamp": dtnow,
}
for fid in files_out
]
# Do a bulk insert of these new rows
di.ArchiveFileCopyRequest.insert_many(insert).execute()
@cli.command()
@click.option(
"--all", help="Show the status of all nodes, not just mounted ones.", is_flag=True
)
def status(all):
"""Summarise the status of alpenhorn storage nodes."""
import tabulate
# Data to fetch from the database (node name, total files, total size)
query_info = (
di.StorageNode.name,
pw.fn.Count(di.ArchiveFileCopy.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
di.StorageNode.host,
di.StorageNode.root,
)
# Per node totals
nodes = (
di.StorageNode.select(*query_info)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.has_file == "Y")
.join(di.ArchiveFile)
.group_by(di.StorageNode)
.order_by(di.StorageNode.name)
)
if not all:
nodes = nodes.where(di.StorageNode.mounted)
# Totals for the whole archive
tot = di.ArchiveFile.select(
pw.fn.Count(di.ArchiveFile.id).alias("count"),
pw.fn.Sum(di.ArchiveFile.size_b).alias("total_size"),
).scalar(as_tuple=True)
data = [
[
node[0],
int(node[1]),
int(node[2]) / 2 ** 40.0,
100.0 * int(node[1]) / int(tot[0]),
100.0 * int(node[2]) / int(tot[1]),
"%s:%s" % (node[3], node[4]),
]
for node in nodes.tuples()
]
headers = ["Node", "Files", "Size [TB]", "Files [%]", "Size [%]", "Path"]
print(tabulate.tabulate(data, headers=headers, floatfmt=".1f"))
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("--md5", help="perform full check against md5sum", is_flag=True)
@click.option(
"--fixdb", help="fix up the database to be consistent with reality", is_flag=True
)
@click.option(
"--acq",
metavar="ACQ",
multiple=True,
help="Limit verification to specified acquisitions. Use repeated --acq flags to specify multiple acquisitions.",
)
def verify(node_name, md5, fixdb, acq):
"""Verify the archive on NODE against the database."""
import os
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
return
## Use a complicated query with a tuples construct to fetch everything we
## need in a single query. This massively speeds up the whole process versus
## fetching all the FileCopy's then querying for Files and Acqs.
lfiles = (
di.ArchiveFile.select(
di.ArchiveFile.name,
di.ArchiveAcq.name,
di.ArchiveFile.size_b,
di.ArchiveFile.md5sum,
di.ArchiveFileCopy.id,
)
.join(di.ArchiveAcq)
.switch(di.ArchiveFile)
.join(di.ArchiveFileCopy)
.where(di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.has_file == "Y")
.tuples()
)
missing_files = []
corrupt_files = []
missing_ids = []
corrupt_ids = []
nfiles = 0
with click.progressbar(lfiles, label="Scanning files") as lfiles_iter:
for filename, acqname, filesize, md5sum, fc_id in lfiles_iter:
# Skip if not in specified acquisitions
if len(acq) > 0 and acqname not in acq:
continue
nfiles += 1
filepath = this_node.root + "/" + acqname + "/" + filename
# Check if file is plain missing
if not os.path.exists(filepath):
missing_files.append(filepath)
missing_ids.append(fc_id)
continue
if md5:
file_md5 = di.md5sum_file(filepath)
corrupt = file_md5 != md5sum
else:
corrupt = os.path.getsize(filepath) != filesize
if corrupt:
corrupt_files.append(filepath)
corrupt_ids.append(fc_id)
continue
if len(missing_files) > 0:
print()
print("=== Missing files ===")
for fname in missing_files:
print(fname)
if len(corrupt_files) > 0:
print()
print("=== Corrupt files ===")
for fname in corrupt_files:
print(fname)
print()
print("=== Summary ===")
print(" %i total files" % nfiles)
print(" %i missing files" % len(missing_files))
print(" %i corrupt files" % len(corrupt_files))
print()
# Fix up the database by marking files as missing, and marking
# corrupt files for verification by alpenhornd.
if fixdb:
# Make sure we connect RW
di.connect_database(read_write=True)
if (len(missing_files) > 0) and click.confirm("Fix missing files"):
missing_count = (
di.ArchiveFileCopy.update(has_file="N")
.where(di.ArchiveFileCopy.id << missing_ids)
.execute()
)
print(" %i marked as missing" % missing_count)
if (len(corrupt_files) > 0) and click.confirm("Fix corrupt files"):
corrupt_count = (
di.ArchiveFileCopy.update(has_file="M")
.where(di.ArchiveFileCopy.id << corrupt_ids)
.execute()
)
print(" %i corrupt files marked for verification" % corrupt_count)
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option(
"--days", "-d", help="clean files older than <days>", type=int, default=None
)
@click.option("--force", "-f", help="force cleaning on an archive node", is_flag=True)
@click.option("--now", "-n", help="force immediate removal", is_flag=True)
@click.option(
"--target",
metavar="TARGET_GROUP",
default=None,
type=str,
help="Only clean files already available in this group.",
)
@click.option(
"--acq", metavar="ACQ", default=None, type=str, help="Limit removal to acquisition"
)
def clean(node_name, days, force, now, target, acq):
"""Clean up NODE by marking older files as potentially removable.
If --target is specified we will only remove files already available in the
TARGET_GROUP. This is useful for cleaning out intermediate locations such as
transport disks.
Using the --days flag will only clean correlator and housekeeping
files which have a timestamp associated with them. It will not
touch other types. If no --days flag is given, all files will be
considered for removal.
"""
import peewee as pw
di.connect_database(read_write=True)
try:
this_node = di.StorageNode.get(di.StorageNode.name == node_name)
except pw.DoesNotExist:
print("Specified node does not exist.")
# Check to see if we are on an archive node
if this_node.storage_type == "A":
if force or click.confirm("DANGER: run clean on archive node?"):
print("%s is an archive node. Forcing clean." % node_name)
else:
print("Cannot clean archive node %s without forcing." % node_name)
return
# Select FileCopys on this node.
files = di.ArchiveFileCopy.select(di.ArchiveFileCopy.id).where(
di.ArchiveFileCopy.node == this_node, di.ArchiveFileCopy.wants_file == "Y"
)
# Limit to acquisition
if acq is not None:
try:
acq = di.ArchiveAcq.get(name=acq)
except pw.DoesNotExit:
raise RuntimeError("Specified acquisition %s does not exist" % acq)
files_in_acq = di.ArchiveFile.select().where(di.ArchiveFile.acq == acq)
files = files.where(di.ArchiveFileCopy.file << files_in_acq)
# If the target option has been specified, only clean files also available there...
if target is not None:
# Fetch a reference to the target group
try:
target_group = di.StorageGroup.get(name=target)
except pw.DoesNotExist:
raise RuntimeError('Target group "%s" does not exist in the DB.' % target)
# First get the nodes at the destination...
nodes_at_target = di.StorageNode.select().where(
di.StorageNode.group == target_group
)
# Then use this to get a list of all files at the destination...
files_at_target = (
di.ArchiveFile.select()
.join(di.ArchiveFileCopy)
.where(
di.ArchiveFileCopy.node << nodes_at_target,
di.ArchiveFileCopy.has_file == "Y",
)
)
# Only match files that are also available at the target
files = files.where(di.ArchiveFileCopy.file << files_at_target)
# If --days has been set we need to restrict to files older than the given
# time. This only works for a few particular file types
if days is not None and days > 0:
# Get the time for the oldest files to keep
oldest = datetime.datetime.now() - datetime.timedelta(days)
oldest_unix = ephemeris.ensure_unix(oldest)
# List of filetypes we want to update, needs a human readable name and a
# FileInfo table.
filetypes = [["correlation", di.CorrFileInfo], ["housekeeping", di.HKFileInfo]]
file_ids = []
# Iterate over file types for cleaning
for name, infotable in filetypes:
# Filter to fetch only ones with a start time older than `oldest`
oldfiles = (
files.join(di.ArchiveFile)
.join(infotable)
.where(infotable.start_time < oldest_unix)
)
local_file_ids = list(oldfiles)
# Get number of correlation files
count = oldfiles.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << local_file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 2 ** 30.0
print(
"Cleaning up %i %s files (%.1f GB) from %s "
% (count, name, size_gb, node_name)
)
file_ids += local_file_ids
# If days is not set, then just select all files that meet the requirements so far
else:
file_ids = list(files)
count = files.count()
if count > 0:
size_bytes = (
di.ArchiveFileCopy.select()
.where(di.ArchiveFileCopy.id << file_ids)
.join(di.ArchiveFile)
.aggregate(pw.fn.Sum(di.ArchiveFile.size_b))
)
size_gb = int(size_bytes) / 1073741824.0
print(
"Cleaning up %i files (%.1f GB) from %s " % (count, size_gb, node_name)
)
# If there are any files to clean, ask for confirmation and the mark them in
# the database for removal
if len(file_ids) > 0:
if force or click.confirm(" Are you sure?"):
print(" Marking files for cleaning.")
state = "N" if now else "M"
update = di.ArchiveFileCopy.update(wants_file=state).where(
di.ArchiveFileCopy.id << file_ids
)
n = update.execute()
print("Marked %i files for cleaning" % n)
else:
print(" Cancelled")
else:
print("No files selected for cleaning on %s." % node_name)
@cli.command()
@click.option(
"--host",
"-H",
help="use specified host rather than local machine",
type=str,
default=None,
)
def mounted(host):
"""list the nodes mounted on this, or another specified, machine"""
import socket
if host is None:
host = socket.gethostname().split(".")[0]
zero = True
for node in di.StorageNode.select().where(
di.StorageNode.host == host, di.StorageNode.mounted == True
):
n_file = (
di.ArchiveFileCopy.select().where(di.ArchiveFileCopy.node == node).count()
)
print("%-25s %-30s %5d files" % (node.name, node.root, n_file))
zero = False
if zero:
print("No nodes are mounted on host %s." % host)
@cli.command()
@click.argument("serial_num")
def format_transport(serial_num):
"""Interactive routine for formatting a transport disc as a storage
node; formats and labels the disc as necessary, the adds to the
database. The disk is specified using the manufacturers
SERIAL_NUM, which is printed on the disk.
"""
import glob
import os
if os.getuid() != 0:
print("You must be root to run mount on a transport disc. I quit.")
return
# Find the disc.
dev = glob.glob("/dev/disk/by-id/*%s" % serial_num)
if len(dev) == 0:
print("No disc with that serial number is attached.")
return
elif len(dev) > 1:
print("Confused: found more than one device matching that serial number:")
for d in dev:
print(" %s" % dev)
print("Aborting.")
return
dev = dev[0]
dev_part = "%s-part1" % dev
# Figure out if it is formatted.
print("Checking to see if disc is formatted. Please wait.")
fp = os.popen("parted -s %s print" % dev)
formatted = False
part_start = False
while True:
l = fp.readline()
if not l:
break
if l.find("Number") == 0 and l.find("Start") > 0 and l.find("File system") > 0:
part_start = True
elif l.strip() != "" and part_start:
formatted = True
fp.close()
if not formatted:
if not click.confirm("Disc is not formatted. Should I format it?"):
return
print("Creating partition. Please wait.")
os.system(
"parted -s -a optimal %s mklabel gpt -- mkpart primary 0%% 100%%" % dev
)
print("Formatting disc. Please wait.")
os.system("mkfs.ext4 %s -m 0 -L CH-%s" % (dev_part, serial_num))
else:
print("Disc is already formatted.")
e2label = get_e2label(dev_part)
name = "CH-%s" % serial_num
if e2label and e2label != name:
print(
"Disc label %s does not conform to labelling standard, "
"which is CH-<serialnum>."
)
exit
elif not e2label:
print('Labelling the disc as "%s" (using e2label) ...' % (name))
assert dev_part is not None
assert len(name) <= MAX_E2LABEL_LEN
stat = os.system("/sbin/e2label %s %s" % (dev_part, name))
if stat:
print("Failed to e2label! Stat = %s. I quit." % (stat))
exit()
# Ensure the mount path exists.
root = "/mnt/%s" % name
if not os.path.isdir(root):
print("Creating mount point %s." % root)
os.mkdir(root)
# Check to see if the disc is mounted.
fp = os.popen("df")
mounted = False
dev_part_abs = os.path.realpath(dev_part)
while 1:
l = fp.readline()
if not l:
break
if l.find(root) > 0:
if l[: len(dev_part)] == dev or l[: len(dev_part_abs)] == dev_part_abs:
mounted = True
else:
print(
"%s is a mount point, but %s is already mounted there."(
root, l.split()[0]
)
)
fp.close()
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print(
"This disc has not been registered yet as a storage node. "
"Registering now."
)
try:
group = di.StorageGroup.get(name="transport")
except pw.DoesNotExist:
print('Hmmm. Storage group "transport" does not exist. I quit.')
exit()
# We need to write to the database.
di.connect_database(read_write=True)
node = di.StorageNode.create(
name=name, root=root, group=group, storage_type="T", min_avail_gb=1
)
print("Successfully created storage node.")
print("Node created but not mounted. Run alpenhorn mount_transport for that.")
@cli.command()
@click.pass_context
@click.argument("node")
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
def mount_transport(ctx, node, user, address):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Mounting disc at %s" % mnt_point)
os.system("mount %s" % mnt_point)
ctx.invoke(mount, name=node, path=mnt_point, user=user, address=address)
@cli.command()
@click.pass_context
@click.argument("node")
def unmount_transport(ctx, node):
"""Mount a transport disk into the system and then make it available to alpenhorn."""
mnt_point = "/mnt/%s" % node
print("Unmounting disc at %s" % mnt_point)
os.system("umount %s" % mnt_point)
ctx.invoke(unmount, root_or_name=node)
@cli.command()
@click.argument("name")
@click.option("--path", help="Root path for this node", type=str, default=None)
@click.option("--user", help="username to access this node.", type=str, default=None)
@click.option(
"--address", help="address for remote access to this node.", type=str, default=None
)
@click.option(
"--hostname",
help="hostname running the alpenhornd instance for this node (set to this hostname by default).",
type=str,
default=None,
)
def mount(name, path, user, address, hostname):
"""Interactive routine for mounting a storage node located at ROOT."""
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=name)
except pw.DoesNotExist:
print('Storage node "%s" does not exist. I quit.' % name)
if node.mounted:
print('Node "%s" is already mounted.' % name)
return
# Set the default hostname if required
if hostname is None:
hostname = socket.gethostname()
print('I will set the host to "%s".' % hostname)
# Set the parameters of this node
node.username = user
node.address = address
node.mounted = True
node.host = hostname
if path is not None:
node.root = path
node.save()
print('Successfully mounted "%s".' % name)
@cli.command()
@click.argument("root_or_name")
def unmount(root_or_name):
"""Unmount a storage node with location or named ROOT_OR_NAME."""
import os
import socket
# We need to write to the database.
di.connect_database(read_write=True)
try:
node = di.StorageNode.get(name=root_or_name)
except pw.DoesNotExist:
if root_or_name[-1] == "/":
root_or_name = root_or_name[: len(root_or_name) - 1]
if not os.path.exists(root_or_name):
print("That is neither a node name, nor a path on this host. " "I quit.")
exit()
try:
node = di.StorageNode.get(root=root_or_name, host=socket.gethostname())
except pw.DoesNotExist:
print(
"That is neither a node name nor a root name that is " "known. I quit."
)
exit()
if not node.mounted:
print("There is no node mounted there any more.")
else:
node.mounted = False
node.save()
print("Node successfully unmounted.")
@cli.command()
@click.argument("node_name", metavar="NODE")
@click.option("-v", "--verbose", count=True)
@click.option(
"--acq",
help="Limit import to specified acquisition directories",
multiple=True,
default=None,
)
@click.option("--dry", "-d", help="Dry run. Do not modify database.", is_flag=True)
def import_files(node_name, verbose, acq, dry):
"""Scan the current directory for known acquisition files and add them into the database for NODE.
This command is useful for manually maintaining an archive where we can run
alpenhornd in the usual manner.
"""
import glob
from ch_util import data_index as di
di.connect_database(read_write=True)
import peewee as pw
# Construct list of acqs to scan
if acq is None:
acqs = glob.glob("*")
else:
acqs = acq
# Keep track of state as we process the files
added_files = [] # Files we have added to the database
corrupt_files = [] # Known files which are corrupt
registered_files = [] # Files already registered in the database
unknown_files = [] # Files not known in the database
not_acqs = [] # Directories which were not known acquisitions
# Fetch a reference to the node
try:
node = di.StorageNode.select().where(di.StorageNode.name == node_name).get()
except pw.DoesNotExist:
print("Unknown node.")
return
with click.progressbar(acqs, label="Scanning acquisitions") as acq_iter:
for acq_name in acq_iter:
try:
di.parse_acq_name(acq_name)
except di.Validation:
not_acqs.append(acq_name)
continue
try:
acq = di.ArchiveAcq.select().where(di.ArchiveAcq.name == acq_name).get()
except pw.DoesNotExist:
not_acqs.append(acq_name)
continue
files = glob.glob(acq_name + "/*")
# Fetch lists of all files in this acquisition, and all
# files in this acq with local copies
file_names = [f.name for f in acq.files]
local_file_names = [
f.name
for f in acq.files.join(di.ArchiveFileCopy).where(
di.ArchiveFileCopy.node == node
)
]
for fn in files:
f_name = os.path.split(fn)[1]
# Check if file exists in database
if f_name not in file_names:
unknown_files.append(fn)
continue
# Check if file is already registered on this node
if f_name in local_file_names:
registered_files.append(fn)
else:
archive_file = (
di.ArchiveFile.select()
.where(di.ArchiveFile.name == f_name, di.ArchiveFile.acq == acq)
.get()
)
if os.path.getsize(fn) != archive_file.size_b:
corrupt_files.append(fn)
continue
added_files.append(fn)
if not dry:
di.ArchiveFileCopy.create(
file=archive_file, node=node, has_file="Y", wants_file="Y"
)
print("\n==== Summary ====")
print()
print("Added %i files" % len(added_files))
print()
print("%i corrupt files." % len(corrupt_files))
print("%i files already registered." % len(registered_files))
print("%i files not known" % len(unknown_files))
print("%i directories were not acquisitions." % len(not_acqs))
if verbose > 0:
print()
print("Added files:")
print()
for fn in added_files:
print(fn)
if verbose > 1:
print("Corrupt:")
for fn in corrupt_files:
print(fn)
print()
print("Unknown files:")
for fn in unknown_files:
print(fn)
print()
print("Unknown acquisitions:")
for fn in not_acqs:
print(fn)
print()
# A few utitly routines for dealing with filesystems
MAX_E2LABEL_LEN = 16
def get_e2label(dev):
import os
pin, pout, perr = os.popen3("/sbin/e2label %s" % dev, "r")
pin.close()
res = pout.read().strip()
err = perr.read()
pout.close()
perr.close()
if not len(err) and len(res) < MAX_E2LABEL_LEN:
return res
return None
def get_mount_device(path):
import os
p = os.popen("mount", "r")
res = p.read()
p.close()
dev = None
for l in res.split("\n"):
if not len(l):
continue
s = l.split()
assert s[1] == "on"
if s[2] == os.path.abspath(path):
dev = s[0]
return dev
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/legacy/client.py",
"copies": "1",
"size": "32643",
"license": "mit",
"hash": -4714294905706121000,
"line_mean": 30.6614936954,
"line_max": 116,
"alpha_frac": 0.579297246,
"autogenerated": false,
"ratio": 3.8475954738330977,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49268927198330975,
"avg_score": null,
"num_lines": null
} |
"""Alpenhorn service."""
import logging
import sys
import click
from . import auto_import, config, db, extensions, logger, storage, update, util
log = logging.getLogger(__name__)
# Register Hook to Log Exception
# ==============================
def log_exception(*args):
log.error("Fatal error!", exc_info=args)
sys.excepthook = log_exception
@click.command()
def cli():
"""Alpenhorn data management service."""
# Load the configuration for alpenhorn
config.load_config()
# Set up logging
logger.start_logging()
# Attempt to load any alpenhor extensions
extensions.load_extensions()
# Connect to the database using the loaded config
db.config_connect()
# Regsiter any extension types
extensions.register_type_extensions()
# Get the name of this host
host = util.get_short_hostname()
# Get the list of currently nodes active
node_list = list(
storage.StorageNode.select().where(
storage.StorageNode.host == host, storage.StorageNode.active
)
)
# Warn if there are no active nodes. We used to exit here, but actually
# it's useful to keep alpenhornd running for nodes where we exclusively use
# transport disks (e.g. jingle)
if len(node_list) == 0:
log.warn('No nodes on this host ("%s") registered in the DB!' % host)
# Setup the observers to watch the nodes for new files
auto_import.setup_observers(node_list)
# Now catch up with the existing files to see if there are any new ones
# that should be imported
auto_import.catchup(node_list)
# Enter main loop performing node updates
try:
update.update_loop(host)
# Exit cleanly on a keyboard interrupt
except KeyboardInterrupt:
log.info("Exiting...")
auto_import.stop_observers()
# Wait for watchdog threads to terminate
auto_import.join_observers()
| {
"repo_name": "radiocosmology/alpenhorn",
"path": "alpenhorn/service.py",
"copies": "1",
"size": "1912",
"license": "mit",
"hash": 8931734148231685000,
"line_mean": 24.1578947368,
"line_max": 80,
"alpha_frac": 0.664748954,
"autogenerated": false,
"ratio": 3.991649269311065,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5156398223311065,
"avg_score": null,
"num_lines": null
} |
alpha = ['A','B','C','D','E','F','G','H','I','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def get_key():
k = raw_input().upper()
key = []
for char in k:
if char in alpha and char not in key: # add the character to the matrix if it's valid and not already in the matrix
key.append(char)
elif char is "J": # handle the case when the letter J appears in the key
key.append("I")
for char in alpha:
if char not in key: # add the rest of the alphahet not appearing in the key to the matrix
key.append(char)
return key
def gen_matrix(key):
matrix = []
counter = 0
if key == '': # create a blank matrix
for xcounter in xrange(5):
x = []
for ycounter in xrange(5):
x.append(alpha[counter])
counter += 1
matrix.append(x)
else: # create a keyed matrix
for xcounter in xrange(5):
x = []
for ycounter in xrange(5):
x.append(key[counter])
counter += 1
matrix.append(x)
return matrix
def print_matrix(matrix):
for counter in xrange(5):
print "%c %c %c %c %c" % (matrix[counter][0], matrix[counter][1], matrix[counter][2], matrix[counter][3], matrix[counter][4])
print "\n"
def get_message():
m = raw_input()
m2 = []
for char in m.upper():
if char in alpha: # handle valid characters in the message
m2.append(char)
elif char == "J": # handle the case when "J" appears in the message
m2.append("I")
elif char == ".": # swap out the period with an x, for convenience
m2.append("X")
return ''.join(m2)
def encrypt(message, key_matrix):
coords = []
ciphertext = []
digraphs = parse_message(message)
for d in digraphs:
swap = []
temp = []
coords = get_coords(d, key_matrix)
if coords[0][0] == coords[1][0]: # digraph lies on same x axis
x,y = ((coords[0][0], (coords[0][1] + 1) % 5))
swap.append((x,y))
x,y = ((coords[1][0], (coords[1][1] + 1) % 5))
swap.append((x,y))
elif coords[0][1] == coords[1][1]: # digraph lies on same y axis
x,y = (((coords[0][0] + 1) % 5), coords[0][1])
swap.append((x,y))
x,y = (((coords[1][0] + 1) % 5), coords[1][1])
swap.append((x,y))
else: # digraph lies on different x & y axis
swap.append((coords[0][0], coords[1][1]))
swap.append((coords[1][0], coords[0][1]))
for x,y in swap:
ciphertext.append(key_matrix[x][y])
print "Your encrypted message is: %s " % ''.join(ciphertext)
def decrypt(message, key_matrix):
coords = []
plaintext = []
digraphs = parse_message(message)
for d in digraphs:
swap = []
temp = []
coords = get_coords(d, key_matrix)
if coords[0][0] == coords[1][0]: # digraph lies on same x axis
x,y = ((coords[0][0], (coords[0][1] - 1) % 5))
swap.append((x,y))
x,y = ((coords[1][0], (coords[1][1] - 1) % 5))
swap.append((x,y))
elif coords[0][1] == coords[1][1]: # digraph lies on same y axis
x,y = (((coords[0][0] - 1) % 5), coords[0][1])
swap.append((x,y))
x,y = (((coords[1][0] - 1) % 5), coords[1][1])
swap.append((x,y))
else: # digraph lies on different x & y axis
swap.append((coords[0][0], coords[1][1]))
swap.append((coords[1][0], coords[0][1]))
for x,y in swap:
plaintext.append(key_matrix[x][y])
print "Your decrypted message is: %s " % ''.join(plaintext)
def parse_message(message):
digraphs = []
while len(message) > 0:
digraph = message[:2]
if len(digraph) == 1: # trailing single chracter at the end of the message
digraph = digraph = "%c%c" % (digraph[0], "X")
digraphs.append(digraph)
message = message[1:]
elif digraph[0] == digraph[1]: # handle double letters appearing in the same digraph
digraph = "%c%c" % (digraph[0], "X")
digraphs.append(digraph)
message = message[1:]
else: # add the digraph to the list
digraphs.append(digraph)
message = message[2:]
return digraphs
def get_coords(digraph, key_matrix):
coords = []
for char in digraph:
for x in xrange(5):
for y in xrange(5):
if key_matrix[x][y] == char:
coords.append((x,y))
return coords
def main():
m = gen_matrix('')
print "Initial PLAYFAIR matrix:"
print_matrix(m)
print "Enter a key:"
k = get_key()
print "Keyed PLAYFAIR matrix:"
m = gen_matrix(k)
print_matrix(m)
print "Enter the message you would like to encrypt"
message = get_message()
print "The message you entered was: %s" % message
encrypt(message, m)
print "Enter the message you would like to decrypt"
message = get_message()
print "The message you entered was: %s" % message
decrypt(message, m)
if __name__ == "__main__":
main() | {
"repo_name": "akshitac8/Course-work",
"path": "Crytography/playfair_cipher.py",
"copies": "1",
"size": "4650",
"license": "apache-2.0",
"hash": 8971951797469037000,
"line_mean": 29.0133333333,
"line_max": 127,
"alpha_frac": 0.5903225806,
"autogenerated": false,
"ratio": 2.7744630071599046,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38647855877599047,
"avg_score": null,
"num_lines": null
} |
ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q',
'R','S','T','U','V','W','X','Y','Z','0','1','2','3','4','5','6','7',
'8','9']
MORSE = ['.-','-...','-.-.','-..','.','..-.','--.','....','..','.---','-.-',
'.-..','--','-.','---','.--.','--.-','.-.','...','-','..-','...-',
'.--','-..-','-.--','--..','-----','.----','..---','...--','....-',
'.....','-....','--...','---..','----.']
SPACE = ' '
SLASH = '/'
def Translate(input_, flag):
'''
input_ = string
flag = string: 'to_morse' or 'from_morse'
'''
text = list()
if flag == 'to_morse':
input_ = list(input_)
for char in input_:
if char in ALPHABET:
text.append(MORSE[ALPHABET.index(char)])
if char == SPACE:
text.append(SLASH)
text.append(SPACE)
if flag == 'from_morse':
input_ = input_.split()
# d = [char for char in ]
for char in input_:
if char == SLASH:
text.append(SPACE)
elif char in MORSE:
text.append(ALPHABET[MORSE.index(char)])
return "".join(text)
if __name__ == '__main__':
path = ""
while True:
path = input("Enter the path of the text file with the code to be"
"translated")
flag = input("Do you want to translate [to_morse] or [from_morse]?")
try:
with open(path, 'r') as f:
file = f.read()
f.close()
print(Translate(file, flag))
except FileNotFoundError:
print("Could not find {}".format(path)) | {
"repo_name": "JuBzzz/dailyprogrammer-challenges",
"path": "easy/challenge #7/morse_translator.py",
"copies": "1",
"size": "1683",
"license": "mit",
"hash": 3079106707886350300,
"line_mean": 29.6181818182,
"line_max": 80,
"alpha_frac": 0.3784907903,
"autogenerated": false,
"ratio": 3.4068825910931175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9195544116947098,
"avg_score": 0.017965852889204048,
"num_lines": 55
} |
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
def to_bin_repr(s):
"""
Represent each character of a string into a binary number.
The result will be in a form of a joint strings
"aa" -> "11000011100001"
:param s: The input string
:return: A binary array
"""
return ''.join(format(ord(x), '08b') for x in s)
def encode_base64(inp):
"""
Encode an input string in base64.
:param inp: The input string
:return: Encoded base64 string
"""
# TODO: Optimize the algorithm using binary operations on numbers; skip using strings -> performance gain
bin_repr = to_bin_repr(inp)
out = ''
i = len(bin_repr) % 24
bin_repr += i * '0'
for a in xrange(0, len(bin_repr), 6):
# This is the binary string converted in pair of 6 bits
pair = bin_repr[a:a+6]
out += alphabet[int(pair, 2)]
# If the encoded part has a length multiple by 24 bits / 4 character symbols , so it should have no padding
if i == 8:
out = out[:len(out) - 1] + '=='
if i == 16:
out = out[:len(out) - 3] + '='
return out
def decode_base64(inp):
"""
Decode a base64 input string
:param inp: The base64 input string
:return: Decoded string
"""
out = ''
res = ''
for a in inp:
if not int(format(alphabet.find(a))) == 64:
out += str(format(alphabet.find(a), '06b'))
i = len(out) % 8
out += i * '0'
for a in xrange(0, len(out), 8):
pair = out[a:a+8]
if not pair == ('0' * 8):
res += ('%x' % int(pair, 2)).decode('hex').decode('utf-8')
return res
def test_functions():
"""
This function is supposed to test the functionality of the functionst provided inside this
project.
"""
# Test to_bin_repr()
assert to_bin_repr('aa') == '0110000101100001'
assert to_bin_repr('5') == '00110101'
assert to_bin_repr('') == ''
# Test encode()
assert encode_base64('M') == 'TQ=='
assert encode_base64('Ma') == 'TWE='
assert encode_base64('pleasure.') == 'cGxlYXN1cmUu'
assert encode_base64('any carnal pleas') == 'YW55IGNhcm5hbCBwbGVhcw=='
assert encode_base64('any carnal pleasu') == 'YW55IGNhcm5hbCBwbGVhc3U='
assert encode_base64('any carnal pleasur') == 'YW55IGNhcm5hbCBwbGVhc3Vy'
assert encode_base64('any carnal pleasure') == 'YW55IGNhcm5hbCBwbGVhc3VyZQ=='
assert encode_base64('any carnal pleasure.') == 'YW55IGNhcm5hbCBwbGVhc3VyZS4='
text = """Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure."""
encoded_text = """TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4="""
assert encode_base64(text) == encoded_text
print decode_base64('YW55IGNhcm5hbCBwbGVhcw==')
print "Tests: Successful."
test_functions() | {
"repo_name": "harrytodorov/py_base64",
"path": "base64.py",
"copies": "1",
"size": "3419",
"license": "mit",
"hash": -2695778515986879000,
"line_mean": 35.3829787234,
"line_max": 385,
"alpha_frac": 0.6493126645,
"autogenerated": false,
"ratio": 2.9964943032427693,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41458069677427695,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.