code
stringlengths 1
199k
|
|---|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['Lag1Trend'] , ['Seasonal_Second'] , ['NoAR'] );
|
from __future__ import absolute_import, division, print_function
from future.builtins import zip
import numpy as np
from skbio.io import FileFormatError
from skbio.io.util import open_file
class OrdinationResults(object):
"""Store ordination results
Attributes
----------
eigvals : 1-D numpy array
The result eigenvalues
species : 2-D numpy array
The result coordinates for each species
site : 2-D numpy array
The results coordinates for each site
biplot : 2-D numpy array
The result biplot coordinates
site_constraints : 2-D numpy array
The result coordinates for each site constraint
proportion_explained : 1-D numpy array
The proportion explained by each eigenvector
species_ids : list of str
The species identifiers
site_ids : list of str
The site identifiers
"""
def __init__(self, eigvals, species=None, site=None, biplot=None,
site_constraints=None, proportion_explained=None,
species_ids=None, site_ids=None):
self.eigvals = eigvals
self.species = species
self.site = site
self.biplot = biplot
self.site_constraints = site_constraints
self.proportion_explained = proportion_explained
self.species_ids = species_ids
self.site_ids = site_ids
@classmethod
def from_file(cls, ord_res_f):
r"""Load ordination results from text file.
Creates a `OrdinationResults` instance from serialized results
stored as text.
`ord_res_f` must be a file-like object containing text.
The ord_res_f format should look like::
Eigvals<tab>2
0.096<tab>0.040
Proportion explained<tab>2
0.512<tab>0.488
Species<tab>3<tab>2
Species1<tab>0.408<tab>0.069
Species2<tab>-0.115<tab>-0.299
Species3<tab>-0.309<tab>0.187
Site<tab>3<tab>2
Site1<tab>-0.848<tab>0.882
Site2<tab>-0.220<tab>-1.344
Site3<tab>1.666<tab>0.470
Biplot<tab>4<tab>3
0.422<tab>-0.559<tab>-0.713
0.988<tab>0.150<tab>-0.011
-0.556<tab>0.817<tab>0.147
-0.404<tab>-0.905<tab>-0.127
Site constraints<tab>3<tab>2
Site1<tab>-0.848<tab>0.882
Site2<tab>-0.220<tab>-1.344
Site3<tab>1.666<tab>0.470
If a given result attribute is not present (e.g. Biplot), it should be
still defined and declare its dimensions as 0::
Biplot<tab>0<tab>0
Parameters
----------
ord_res_f : iterable of str or str
Iterable of strings (e.g., open file handle, file-like object, list
of strings, etc.) or a file path (a string) containing the
serialized ordination results.
Returns
-------
OrdinationResults
Instance of type `cls` containing the parsed contents of
`ord_res_f`.
Raises
------
ValueError
if the shapes of the different sections of the file are not
consistent
FileFormatError
if the format of the file is not recognized
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results::
Eigvals\t2
0.0961330159181\t0.0409418140138
Proportion explained\t0
Species\t3\t2
Species1\t0.408869425742\t0.0695518116298
Species2\t-0.1153860437\t-0.299767683538
Species3\t-0.309967102571\t0.187391917117
Site\t3\t2
Site1\t-0.848956053187\t0.882764759014
Site2\t-0.220458650578\t-1.34482000302
Site3\t1.66697179591\t0.470324389808
Biplot\t0\t0
Site constraints\t0\t0
Load the ordination results from the file:
>>> from StringIO import StringIO
>>> from skbio.stats.ordination import OrdinationResults
>>> or_f = StringIO("Eigvals\t2\n"
... "0.0961330159181\t0.0409418140138\n"
... "\n"
... "Proportion explained\t0\n"
... "\n"
... "Species\t3\t2\n"
... "Species1\t0.408869425742\t0.0695518116298\n"
... "Species2\t-0.1153860437\t-0.299767683538\n"
... "Species3\t-0.309967102571\t0.187391917117\n"
... "\n"
... "Site\t3\t2\n"
... "Site1\t-0.848956053187\t0.882764759014\n"
... "Site2\t-0.220458650578\t-1.34482000302\n"
... "Site3\t1.66697179591\t0.470324389808\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.from_file(or_f)
"""
with open_file(ord_res_f, 'U') as fd:
orf = iter(fd)
# Starting at line 0, we should find the eigvals
eigvals = cls._parse_eigvals(orf)
# The next line should be an empty line
cls._check_empty_line(orf)
# Now we should find the proportion explained section
prop_expl = cls._parse_proportion_explained(orf)
if prop_expl is not None:
if len(prop_expl) != len(eigvals):
raise ValueError(
'There should be as many proportion explained'
' values as eigvals: %d != %d' %
(len(prop_expl), len(eigvals)))
# The next line should be an empty line
cls._check_empty_line(orf)
# Next section should be the species section
species, species_ids = cls._parse_coords(orf, 'Species')
if species is not None:
if len(species[0]) != len(eigvals):
raise ValueError(
'There should be as many coordinates per'
' species as eigvals: %d != %d' %
(len(species[0]), len(eigvals)))
# The next line should be an empty line
cls._check_empty_line(orf)
# Next section should be the site section
site, site_ids = cls._parse_coords(orf, 'Site')
if site is not None:
if len(site[0]) != len(eigvals):
raise ValueError(
'There should be as many coordinates per'
' site as eigvals: %d != %d' %
(len(site[0]), len(eigvals)))
# The next line should be an empty line
cls._check_empty_line(orf)
# Next section should be the biplot section
biplot = cls._parse_biplot(orf)
# The next line should be an empty line
cls._check_empty_line(orf)
# Next section should be the site constraints section
cons, cons_ids = cls._parse_coords(orf, 'Site constraints')
if cons_ids is not None and site_ids is not None:
if cons_ids != site_ids:
raise ValueError(
'Site constraints ids and site ids must be'
' equal: %s != %s' % (cons_ids, site_ids))
return cls(eigvals=eigvals, species=species, site=site, biplot=biplot,
site_constraints=cons, proportion_explained=prop_expl,
species_ids=species_ids, site_ids=site_ids)
@staticmethod
def _parse_eigvals(lines):
"""Parse the eigvals section of lines"""
# The first line should contain the Eigvals header:
# Eigvals<tab>NumEigvals
header = next(lines).strip().split('\t')
if len(header) != 2 or header[0] != 'Eigvals':
raise FileFormatError('Eigvals header not found')
# Parse how many eigvals are we waiting for
num_eigvals = int(header[1])
if num_eigvals == 0:
raise ValueError('At least one eigval should be present')
# Parse the eigvals, present on the next line
# Eigval_1<tab>Eigval_2<tab>Eigval_3<tab>...
eigvals = np.asarray(next(lines).strip().split('\t'),
dtype=np.float64)
if len(eigvals) != num_eigvals:
raise ValueError('Expected %d eigvals, but found %d.' %
(num_eigvals, len(eigvals)))
return eigvals
@staticmethod
def _check_empty_line(lines):
"""Checks that the next line in lines is empty"""
if next(lines).strip():
raise FileFormatError('Expected an empty line')
@staticmethod
def _parse_proportion_explained(lines):
"""Parse the proportion explained section of lines"""
# Parse the proportion explained header:
# Proportion explained<tab>NumPropExpl
header = next(lines).strip().split('\t')
if (len(header) != 2 or
header[0] != 'Proportion explained'):
raise FileFormatError('Proportion explained header not found')
# Parse how many prop expl values are we waiting for
num_prop_expl = int(header[1])
if num_prop_expl == 0:
# The ordination method didn't generate the prop explained vector
# set it to None
prop_expl = None
else:
# Parse the line with the proportion explained values
prop_expl = np.asarray(next(lines).strip().split('\t'),
dtype=np.float64)
if len(prop_expl) != num_prop_expl:
raise ValueError(
'Expected %d proportion explained values, but'
' found %d.' % (num_prop_expl, len(prop_expl)))
return prop_expl
@staticmethod
def _parse_coords(lines, header_id):
"""Parse a coordinate section of lines, with header=header_id"""
# Parse the coords header
header = next(lines).strip().split('\t')
if len(header) != 3 or header[0] != header_id:
raise FileFormatError('%s header not found.' % header_id)
# Parse the dimensions of the coord matrix
rows = int(header[1])
cols = int(header[2])
if rows == 0 and cols == 0:
# The ordination method didn't generate the coords for 'header'
# Set the results to None
coords = None
ids = None
elif (rows == 0 and cols != 0) or (rows != 0 and cols == 0):
# Both dimensions should be 0 or none of them are zero
raise ValueError('One dimension of %s is 0: %d x %d' %
(header, rows, cols))
else:
# Parse the coord lines
coords = np.empty((rows, cols), dtype=np.float64)
ids = []
for i in range(rows):
# Parse the next row of data
vals = next(lines).strip().split('\t')
# The +1 comes from the row header (which contains the row id)
if len(vals) != cols + 1:
raise ValueError('Expected %d values, but found %d in row '
'%d.' % (cols, len(vals) - 1, i))
ids.append(vals[0])
coords[i, :] = np.asarray(vals[1:], dtype=np.float64)
return coords, ids
@staticmethod
def _parse_biplot(lines):
"""Parse the biplot section of lines"""
# Parse the biplot header
header = next(lines).strip().split('\t')
if len(header) != 3 or header[0] != 'Biplot':
raise FileFormatError('Biplot header not found.')
# Parse the dimensions of the Biplot matrix
rows = int(header[1])
cols = int(header[2])
if rows == 0 and cols == 0:
# The ordination method didn't generate the biplot matrix
# Set the results to None
biplot = None
elif (rows == 0 and cols != 0) or (rows != 0 and cols == 0):
# Both dimensions should be 0 or none of them are zero
raise ValueError('One dimension of %s is 0: %d x %d' %
(header, rows, cols))
else:
# Parse the biplot matrix
biplot = np.empty((rows, cols), dtype=np.float64)
for i in range(rows):
# Parse the next row of data
vals = next(lines).strip().split('\t')
if len(vals) != cols:
raise ValueError('Expected %d values, but founf %d in row '
'%d.' % (cols, len(vals), i))
biplot[i, :] = np.asarray(vals, dtype=np.float64)
return biplot
def to_file(self, out_f):
"""Save the ordination results to file in text format.
Parameters
----------
out_f : file-like object or filename
File-like object to write serialized data to, or name of
file. If it's a file-like object, it must have a ``write``
method, and it won't be closed. Else, it is opened and
closed after writing.
See Also
--------
from_file
"""
with open_file(out_f, 'w') as out_f:
# Write eigvals
out_f.write("Eigvals\t%d\n" % self.eigvals.shape)
out_f.write("%s\n\n" % '\t'.join(np.asarray(self.eigvals,
dtype=np.str)))
# Write proportion explained
if self.proportion_explained is None:
out_f.write("Proportion explained\t0\n\n")
else:
out_f.write("Proportion explained\t%d\n" %
self.proportion_explained.shape)
out_f.write("%s\n\n" % '\t'.join(
np.asarray(self.proportion_explained, dtype=np.str)))
# Write species
if self.species is None:
out_f.write("Species\t0\t0\n\n")
else:
out_f.write("Species\t%d\t%d\n" % self.species.shape)
for id_, vals in zip(self.species_ids, self.species):
out_f.write("%s\t%s\n" % (id_, '\t'.join(np.asarray(vals,
dtype=np.str))))
out_f.write("\n")
# Write site
if self.site is None:
out_f.write("Site\t0\t0\n\n")
else:
out_f.write("Site\t%d\t%d\n" % self.site.shape)
for id_, vals in zip(self.site_ids, self.site):
out_f.write("%s\t%s\n" % (id_, '\t'.join(
np.asarray(vals, dtype=np.str))))
out_f.write("\n")
# Write biplot
if self.biplot is None:
out_f.write("Biplot\t0\t0\n\n")
else:
out_f.write("Biplot\t%d\t%d\n" % self.biplot.shape)
for vals in self.biplot:
out_f.write("%s\n" % '\t'.join(
np.asarray(vals, dtype=np.str)))
out_f.write("\n")
# Write site-constraints
if self.site_constraints is None:
out_f.write("Site constraints\t0\t0\n")
else:
out_f.write("Site constraints\t%d\t%d\n" %
self.site_constraints.shape)
for id_, vals in zip(self.site_ids, self.site_constraints):
out_f.write("%s\t%s\n" % (id_, '\t'.join(
np.asarray(vals, dtype=np.str))))
class Ordination(object):
short_method_name = 'Overwrite in subclass!'
long_method_name = 'Overwrite in subclass!'
|
import unittest
import IECore
import IECoreGL
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class StandardStyleTest( GafferUITest.TestCase ) :
def testColorAccessors( self ) :
s = GafferUI.StandardStyle()
i = 0
for n in GafferUI.StandardStyle.Color.names :
if n=="LastColor" :
continue
c = IECore.Color3f( i )
v = getattr( GafferUI.StandardStyle.Color, n )
s.setColor( v, c )
self.assertEqual( s.getColor( v ), c )
i += 1
def testFontAccessors( self ) :
s = GafferUI.StandardStyle()
f = IECoreGL.FontLoader.defaultFontLoader().load( "VeraMono.ttf" )
for n in GafferUI.Style.TextType.names :
if n=="LastText" :
continue
v = getattr( GafferUI.Style.TextType, n )
s.setFont( v, f )
self.failUnless( s.getFont( v ).isSame( f ) )
def testChangedSignal( self ) :
s = GafferUI.StandardStyle()
cs = GafferTest.CapturingSlot( s.changedSignal() )
self.assertEqual( len( cs ), 0 )
s.setColor( GafferUI.StandardStyle.Color.BackgroundColor, IECore.Color3f( 0 ) )
self.assertEqual( len( cs ), 1 )
self.assertTrue( cs[0][0].isSame( s ) )
s.setColor( GafferUI.StandardStyle.Color.BackgroundColor, IECore.Color3f( 1 ) )
self.assertEqual( len( cs ), 2 )
self.assertTrue( cs[1][0].isSame( s ) )
s.setColor( GafferUI.StandardStyle.Color.BackgroundColor, IECore.Color3f( 1 ) )
self.assertEqual( len( cs ), 2 )
f = IECoreGL.FontLoader.defaultFontLoader().load( "VeraMono.ttf" )
s.setFont( GafferUI.Style.TextType.LabelText, f )
self.assertEqual( len( cs ), 3 )
self.assertTrue( cs[2][0].isSame( s ) )
s.setFont( GafferUI.Style.TextType.LabelText, f )
self.assertEqual( len( cs ), 3 )
if __name__ == "__main__":
unittest.main()
|
"""Equality-constrained quadratic programming solvers."""
from __future__ import division, print_function, absolute_import
from scipy.sparse import (linalg, bmat, csc_matrix)
from math import copysign
import numpy as np
from numpy.linalg import norm
__all__ = [
'eqp_kktfact',
'sphere_intersections',
'box_intersections',
'box_sphere_intersections',
'inside_box_boundaries',
'modified_dogleg',
'projected_cg'
]
def eqp_kktfact(H, c, A, b):
"""Solve equality-constrained quadratic programming (EQP) problem.
Solve ``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0``
using direct factorization of the KKT system.
Parameters
----------
H : sparse matrix, shape (n, n)
Hessian matrix of the EQP problem.
c : array_like, shape (n,)
Gradient of the quadratic objective function.
A : sparse matrix
Jacobian matrix of the EQP problem.
b : array_like, shape (m,)
Right-hand side of the constraint equation.
Returns
-------
x : array_like, shape (n,)
Solution of the KKT problem.
lagrange_multipliers : ndarray, shape (m,)
Lagrange multipliers of the KKT problem.
"""
n, = np.shape(c) # Number of parameters
m, = np.shape(b) # Number of constraints
# Karush-Kuhn-Tucker matrix of coefficients.
# Defined as in Nocedal/Wright "Numerical
# Optimization" p.452 in Eq. (16.4).
kkt_matrix = csc_matrix(bmat([[H, A.T], [A, None]]))
# Vector of coefficients.
kkt_vec = np.hstack([-c, -b])
# TODO: Use a symmetric indefinite factorization
# to solve the system twice as fast (because
# of the symmetry).
lu = linalg.splu(kkt_matrix)
kkt_sol = lu.solve(kkt_vec)
x = kkt_sol[:n]
lagrange_multipliers = -kkt_sol[n:n+m]
return x, lagrange_multipliers
def sphere_intersections(z, d, trust_radius,
entire_line=False):
"""Find the intersection between segment (or line) and spherical constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d`` and the ball
``||x|| <= trust_radius``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
trust_radius : float
Ball radius.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the ball
``||x|| <= trust_radius``. When ``False`` returns the intersection
between the segment ``x(t) = z + t*d``, ``0 <= t <= 1``, and the ball.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the ball for
for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line/segment
and the sphere. On the other hand, when ``False``, there is no
intersection.
"""
# Special case when d=0
if norm(d) == 0:
return 0, 0, False
# Check for inf trust_radius
if np.isinf(trust_radius):
if entire_line:
ta = -np.inf
tb = np.inf
else:
ta = 0
tb = 1
intersect = True
return ta, tb, intersect
a = np.dot(d, d)
b = 2 * np.dot(z, d)
c = np.dot(z, z) - trust_radius**2
discriminant = b*b - 4*a*c
if discriminant < 0:
intersect = False
return 0, 0, intersect
sqrt_discriminant = np.sqrt(discriminant)
# The following calculation is mathematically
# equivalent to:
# ta = (-b - sqrt_discriminant) / (2*a)
# tb = (-b + sqrt_discriminant) / (2*a)
# but produce smaller round off errors.
# Look at Matrix Computation p.97
# for a better justification.
aux = b + copysign(sqrt_discriminant, b)
ta = -aux / (2*a)
tb = -2*c / aux
ta, tb = sorted([ta, tb])
if entire_line:
intersect = True
else:
# Checks to see if intersection happens
# within vectors length.
if tb < 0 or ta > 1:
intersect = False
ta = 0
tb = 0
else:
intersect = True
# Restrict intersection interval
# between 0 and 1.
ta = max(0, ta)
tb = min(1, tb)
return ta, tb, intersect
def box_intersections(z, d, lb, ub,
entire_line=False):
"""Find the intersection between segment (or line) and box constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d`` and the rectangular box
``lb <= x <= ub``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the rectangular
box. When ``False`` returns the intersection between the segment
``x(t) = z + t*d``, ``0 <= t <= 1``, and the rectangular box.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the box for
for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line (or segment)
and the rectangular box. On the other hand, when ``False``, there is no
intersection.
"""
# Make sure it is a numpy array
z = np.asarray(z)
d = np.asarray(d)
lb = np.asarray(lb)
ub = np.asarray(ub)
# Special case when d=0
if norm(d) == 0:
return 0, 0, False
# Get values for which d==0
zero_d = (d == 0)
# If the boundaries are not satisfied for some coordinate
# for which "d" is zero, there is no box-line intersection.
if (z[zero_d] < lb[zero_d]).any() or (z[zero_d] > ub[zero_d]).any():
intersect = False
return 0, 0, intersect
# Remove values for which d is zero
not_zero_d = np.logical_not(zero_d)
z = z[not_zero_d]
d = d[not_zero_d]
lb = lb[not_zero_d]
ub = ub[not_zero_d]
# Find a series of intervals (t_lb[i], t_ub[i]).
t_lb = (lb-z) / d
t_ub = (ub-z) / d
# Get the intersection of all those intervals.
ta = max(np.minimum(t_lb, t_ub))
tb = min(np.maximum(t_lb, t_ub))
# Check if intersection is feasible
if ta <= tb:
intersect = True
else:
intersect = False
# Checks to see if intersection happens within vectors length.
if not entire_line:
if tb < 0 or ta > 1:
intersect = False
ta = 0
tb = 0
else:
# Restrict intersection interval between 0 and 1.
ta = max(0, ta)
tb = min(1, tb)
return ta, tb, intersect
def box_sphere_intersections(z, d, lb, ub, trust_radius,
entire_line=False,
extra_info=False):
"""Find the intersection between segment (or line) and box/sphere constraints.
Find the intersection between the segment (or line) defined by the
parametric equation ``x(t) = z + t*d``, the rectangular box
``lb <= x <= ub`` and the ball ``||x|| <= trust_radius``.
Parameters
----------
z : array_like, shape (n,)
Initial point.
d : array_like, shape (n,)
Direction.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``. Used
to delimit the rectangular box.
trust_radius : float
Ball radius.
entire_line : bool, optional
When ``True`` the function returns the intersection between the line
``x(t) = z + t*d`` (``t`` can assume any value) and the constraints.
When ``False`` returns the intersection between the segment
``x(t) = z + t*d``, ``0 <= t <= 1`` and the constraints.
extra_info : bool, optional
When ``True`` returns ``intersect_sphere`` and ``intersect_box``.
Returns
-------
ta, tb : float
The line/segment ``x(t) = z + t*d`` is inside the rectangular box and
inside the ball for for ``ta <= t <= tb``.
intersect : bool
When ``True`` there is a intersection between the line (or segment)
and both constraints. On the other hand, when ``False``, there is no
intersection.
sphere_info : dict, optional
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
for which the line intercept the ball. And a boolean value indicating
whether the sphere is intersected by the line.
box_info : dict, optional
Dictionary ``{ta, tb, intersect}`` containing the interval ``[ta, tb]``
for which the line intercept the box. And a boolean value indicating
whether the box is intersected by the line.
"""
ta_b, tb_b, intersect_b = box_intersections(z, d, lb, ub,
entire_line)
ta_s, tb_s, intersect_s = sphere_intersections(z, d,
trust_radius,
entire_line)
ta = np.maximum(ta_b, ta_s)
tb = np.minimum(tb_b, tb_s)
if intersect_b and intersect_s and ta <= tb:
intersect = True
else:
intersect = False
if extra_info:
sphere_info = {'ta': ta_s, 'tb': tb_s, 'intersect': intersect_s}
box_info = {'ta': ta_b, 'tb': tb_b, 'intersect': intersect_b}
return ta, tb, intersect, sphere_info, box_info
else:
return ta, tb, intersect
def inside_box_boundaries(x, lb, ub):
"""Check if lb <= x <= ub."""
return (lb <= x).all() and (x <= ub).all()
def reinforce_box_boundaries(x, lb, ub):
"""Return clipped value of x"""
return np.minimum(np.maximum(x, lb), ub)
def reinforce_box_boundaries(x, lb, ub):
"""Return clipped value of x"""
return np.minimum(np.maximum(x, lb), ub)
def modified_dogleg(A, Y, b, trust_radius, lb, ub):
"""Approximately minimize ``1/2*|| A x + b ||^2`` inside trust-region.
Approximately solve the problem of minimizing ``1/2*|| A x + b ||^2``
subject to ``||x|| < Delta`` and ``lb <= x <= ub`` using a modification
of the classical dogleg approach.
Parameters
----------
A : LinearOperator (or sparse matrix or ndarray), shape (m, n)
Matrix ``A`` in the minimization problem. It should have
dimension ``(m, n)`` such that ``m < n``.
Y : LinearOperator (or sparse matrix or ndarray), shape (n, m)
LinearOperator that apply the projection matrix
``Q = A.T inv(A A.T)`` to the vector. The obtained vector
``y = Q x`` being the minimum norm solution of ``A y = x``.
b : array_like, shape (m,)
Vector ``b``in the minimization problem.
trust_radius: float
Trust radius to be considered. Delimits a sphere boundary
to the problem.
lb : array_like, shape (n,)
Lower bounds to each one of the components of ``x``.
It is expected that ``lb <= 0``, otherwise the algorithm
may fail. If ``lb[i] = -Inf`` the lower
bound for the i-th component is just ignored.
ub : array_like, shape (n, )
Upper bounds to each one of the components of ``x``.
It is expected that ``ub >= 0``, otherwise the algorithm
may fail. If ``ub[i] = Inf`` the upper bound for the i-th
component is just ignored.
Returns
-------
x : array_like, shape (n,)
Solution to the problem.
Notes
-----
Based on implementations described in p.p. 885-886 from [1]_.
References
----------
.. [1] Byrd, Richard H., Mary E. Hribar, and Jorge Nocedal.
"An interior point algorithm for large-scale nonlinear
programming." SIAM Journal on Optimization 9.4 (1999): 877-900.
"""
# Compute minimum norm minimizer of 1/2*|| A x + b ||^2.
newton_point = -Y.dot(b)
# Check for interior point
if inside_box_boundaries(newton_point, lb, ub) \
and norm(newton_point) <= trust_radius:
x = newton_point
return x
# Compute gradient vector ``g = A.T b``
g = A.T.dot(b)
# Compute cauchy point
# `cauchy_point = g.T g / (g.T A.T A g)``.
A_g = A.dot(g)
cauchy_point = -np.dot(g, g) / np.dot(A_g, A_g) * g
# Origin
origin_point = np.zeros_like(cauchy_point)
# Check the segment between cauchy_point and newton_point
# for a possible solution.
z = cauchy_point
p = newton_point - cauchy_point
_, alpha, intersect = box_sphere_intersections(z, p, lb, ub,
trust_radius)
if intersect:
x1 = z + alpha*p
else:
# Check the segment between the origin and cauchy_point
# for a possible solution.
z = origin_point
p = cauchy_point
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
trust_radius)
x1 = z + alpha*p
# Check the segment between origin and newton_point
# for a possible solution.
z = origin_point
p = newton_point
_, alpha, _ = box_sphere_intersections(z, p, lb, ub,
trust_radius)
x2 = z + alpha*p
# Return the best solution among x1 and x2.
if norm(A.dot(x1) + b) < norm(A.dot(x2) + b):
return x1
else:
return x2
def projected_cg(H, c, Z, Y, b, trust_radius=np.inf,
lb=None, ub=None, tol=None,
max_iter=None, max_infeasible_iter=None,
return_all=False):
"""Solve EQP problem with projected CG method.
Solve equality-constrained quadratic programming problem
``min 1/2 x.T H x + x.t c`` subject to ``A x + b = 0`` and,
possibly, to trust region constraints ``||x|| < trust_radius``
and box constraints ``lb <= x <= ub``.
Parameters
----------
H : LinearOperator (or sparse matrix or ndarray), shape (n, n)
Operator for computing ``H v``.
c : array_like, shape (n,)
Gradient of the quadratic objective function.
Z : LinearOperator (or sparse matrix or ndarray), shape (n, n)
Operator for projecting ``x`` into the null space of A.
Y : LinearOperator, sparse matrix, ndarray, shape (n, m)
Operator that, for a given a vector ``b``, compute smallest
norm solution of ``A x + b = 0``.
b : array_like, shape (m,)
Right-hand side of the constraint equation.
trust_radius : float, optional
Trust radius to be considered. By default uses ``trust_radius=inf``,
which means no trust radius at all.
lb : array_like, shape (n,), optional
Lower bounds to each one of the components of ``x``.
If ``lb[i] = -Inf`` the lower bound for the i-th
component is just ignored (default).
ub : array_like, shape (n, ), optional
Upper bounds to each one of the components of ``x``.
If ``ub[i] = Inf`` the upper bound for the i-th
component is just ignored (default).
tol : float, optional
Tolerance used to interrupt the algorithm.
max_iter : int, optional
Maximum algorithm iterations. Where ``max_inter <= n-m``.
By default uses ``max_iter = n-m``.
max_infeasible_iter : int, optional
Maximum infeasible (regarding box constraints) iterations the
algorithm is allowed to take.
By default uses ``max_infeasible_iter = n-m``.
return_all : bool, optional
When ``true`` return the list of all vectors through the iterations.
Returns
-------
x : array_like, shape (n,)
Solution of the EQP problem.
info : Dict
Dictionary containing the following:
- niter : Number of iterations.
- stop_cond : Reason for algorithm termination:
1. Iteration limit was reached;
2. Reached the trust-region boundary;
3. Negative curvature detected;
4. Tolerance was satisfied.
- allvecs : List containing all intermediary vectors (optional).
- hits_boundary : True if the proposed step is on the boundary
of the trust region.
Notes
-----
Implementation of Algorithm 6.2 on [1]_.
In the absence of spherical and box constraints, for sufficient
iterations, the method returns a truly optimal result.
In the presence of those constraints the value returned is only
a inexpensive approximation of the optimal value.
References
----------
.. [1] Gould, Nicholas IM, Mary E. Hribar, and Jorge Nocedal.
"On the solution of equality constrained quadratic
programming problems arising in optimization."
SIAM Journal on Scientific Computing 23.4 (2001): 1376-1395.
"""
CLOSE_TO_ZERO = 1e-25
n, = np.shape(c) # Number of parameters
m, = np.shape(b) # Number of constraints
# Initial Values
x = Y.dot(-b)
r = Z.dot(H.dot(x) + c)
g = Z.dot(r)
p = -g
# Store ``x`` value
if return_all:
allvecs = [x]
# Values for the first iteration
H_p = H.dot(p)
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
# If x > trust-region the problem does not have a solution.
tr_distance = trust_radius - norm(x)
if tr_distance < 0:
raise ValueError("Trust region problem does not have a solution.")
# If x == trust_radius, then x is the solution
# to the optimization problem, since x is the
# minimum norm solution to Ax=b.
elif tr_distance < CLOSE_TO_ZERO:
info = {'niter': 0, 'stop_cond': 2, 'hits_boundary': True}
if return_all:
allvecs.append(x)
info['allvecs'] = allvecs
return x, info
# Set default tolerance
if tol is None:
tol = max(min(0.01 * np.sqrt(rt_g), 0.1 * rt_g), CLOSE_TO_ZERO)
# Set default lower and upper bounds
if lb is None:
lb = np.full(n, -np.inf)
if ub is None:
ub = np.full(n, np.inf)
# Set maximum iterations
if max_iter is None:
max_iter = n-m
max_iter = min(max_iter, n-m)
# Set maximum infeasible iterations
if max_infeasible_iter is None:
max_infeasible_iter = n-m
hits_boundary = False
stop_cond = 1
counter = 0
last_feasible_x = np.empty_like(x)
k = 0
for i in range(max_iter):
# Stop criteria - Tolerance : r.T g < tol
if rt_g < tol:
stop_cond = 4
break
k += 1
# Compute curvature
pt_H_p = H_p.dot(p)
# Stop criteria - Negative curvature
if pt_H_p <= 0:
if np.isinf(trust_radius):
raise ValueError("Negative curvature not "
"allowed for unrestrited "
"problems.")
else:
# Find intersection with constraints
_, alpha, intersect = box_sphere_intersections(
x, p, lb, ub, trust_radius, entire_line=True)
# Update solution
if intersect:
x = x + alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
x = reinforce_box_boundaries(x, lb, ub)
# Atribute information
stop_cond = 3
hits_boundary = True
break
# Get next step
alpha = rt_g / pt_H_p
x_next = x + alpha*p
# Stop criteria - Hits boundary
if np.linalg.norm(x_next) >= trust_radius:
# Find intersection with box constraints
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
trust_radius)
# Update solution
if intersect:
x = x + theta*alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
x = reinforce_box_boundaries(x, lb, ub)
# Atribute information
stop_cond = 2
hits_boundary = True
break
# Check if ``x`` is inside the box and start counter if it is not.
if inside_box_boundaries(x_next, lb, ub):
counter = 0
else:
counter += 1
# Whenever outside box constraints keep looking for intersections.
if counter > 0:
_, theta, intersect = box_sphere_intersections(x, alpha*p, lb, ub,
trust_radius)
if intersect:
last_feasible_x = x + theta*alpha*p
# Reinforce variables are inside box constraints.
# This is only necessary because of roundoff errors.
last_feasible_x = reinforce_box_boundaries(last_feasible_x,
lb, ub)
counter = 0
# Stop after too many infeasible (regarding box constraints) iteration.
if counter > max_infeasible_iter:
break
# Store ``x_next`` value
if return_all:
allvecs.append(x_next)
# Update residual
r_next = r + alpha*H_p
# Project residual g+ = Z r+
g_next = Z.dot(r_next)
# Compute conjugate direction step d
rt_g_next = norm(g_next)**2 # g.T g = r.T g (ref [1]_ p.1389)
beta = rt_g_next / rt_g
p = - g_next + beta*p
# Prepare for next iteration
x = x_next
g = g_next
r = g_next
rt_g = norm(g)**2 # g.T g = r.T Z g = r.T g (ref [1]_ p.1389)
H_p = H.dot(p)
if not inside_box_boundaries(x, lb, ub):
x = last_feasible_x
hits_boundary = True
info = {'niter': k, 'stop_cond': stop_cond,
'hits_boundary': hits_boundary}
if return_all:
info['allvecs'] = allvecs
return x, info
|
from datetime import datetime
from dateutil import rrule
from dateutil.relativedelta import relativedelta
import pytz
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import SQLProduct
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.sqlreport import SqlTabularReport
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from corehq.apps.style.decorators import use_nvd3
from couchexport.models import Format
from custom.common import ALL_OPTION
from custom.ilsgateway.models import SupplyPointStatusTypes, OrganizationSummary
from corehq.apps.reports.graph_models import PieChart
from dimagi.utils.dates import DateSpan
from dimagi.utils.decorators.memoized import memoized
from custom.ilsgateway.tanzania.reports.utils import make_url
from dimagi.utils.parsing import ISO_DATE_FORMAT
class ILSPieChart(PieChart):
def __init__(self, title, key, values, color=None):
super(ILSPieChart, self).__init__(title, key, values, color)
self.data = values
class ILSData(object):
show_table = False
show_chart = True
title_url = None
title_url_name = None
subtitle = None
default_rows = 10
searchable = False
use_datatables = False
chart_config = {
'on_time': {
'color': 'green',
'display': 'Submitted On Time'
},
'late': {
'color': 'orange',
'display': 'Submitted Late'
},
'not_submitted': {
'color': 'red',
'display': "Haven't Submitted "
},
'del_received': {
'color': 'green',
'display': 'Delivery Received',
},
'del_not_received': {
'color': 'red',
'display': 'Delivery Not Received',
},
'sup_received': {
'color': 'green',
'display': 'Supervision Received',
},
'sup_not_received': {
'color': 'red',
'display': 'Supervision Not Received',
},
'not_responding': {
'color': '#8b198b',
'display': "Didn't Respond"
},
}
vals_config = {
SupplyPointStatusTypes.SOH_FACILITY: ['on_time', 'late', 'not_submitted', 'not_responding'],
SupplyPointStatusTypes.DELIVERY_FACILITY: ['del_received', 'del_not_received', 'not_responding'],
SupplyPointStatusTypes.R_AND_R_FACILITY: ['on_time', 'late', 'not_submitted', 'not_responding'],
SupplyPointStatusTypes.SUPERVISION_FACILITY: ['sup_received', 'sup_not_received', 'not_responding']
}
def __init__(self, config=None, css_class='row_chart'):
self.config = config or {}
self.css_class = css_class
@property
def headers(self):
return []
@property
def rows(self):
raise NotImplementedError
@property
def charts(self):
data = self.rows
ret = []
sum_all = 0
colors = []
if data:
data = data[0]
for key in self.vals_config[data.title]:
if getattr(data, key, None):
sum_all = sum_all + getattr(data, key)
for key in self.vals_config[data.title]:
if getattr(data, key, None):
entry = {}
entry['value'] = round(float(getattr(data, key)) * 100 / float((sum_all or 1)), 1)
colors.append(self.chart_config[key]['color'])
entry['label'] = self.chart_config[key]['display']
params = (
entry['value'],
getattr(data, key), entry['label'],
self.config['startdate'].strftime("%b %Y")
)
entry['description'] = "%.1f%% (%d) %s (%s)" % params
ret.append(entry)
chart = ILSPieChart('', '', ret, color=colors)
chart.marginLeft = 10
chart.marginRight = 10
chart.height = 500
return [chart]
class ILSMixin(object):
report_facilities_url = None
report_stockonhand_url = None
report_rand_url = None
report_supervision_url = None
report_delivery_url = None
class ILSDateSpan(DateSpan):
@classmethod
def get_date(cls, type=None, month_or_quarter=None, year=None, format=ISO_DATE_FORMAT,
inclusive=True, timezone=pytz.utc):
"""
Generate a DateSpan object given type, month or quarter and year.
april = DateSpan.get_date(1, 04, 2013)
First Quarter: DateSpan.from_month(2, 1, 2015)
2015: DateSpan.from_month(3, None, 2015)
"""
if month_or_quarter is None:
month_or_quarter = datetime.datetime.date.today().month
if year is None:
year = datetime.datetime.date.today().year
assert isinstance(month_or_quarter, int) and isinstance(year, int)
if type == 3:
start = datetime(int(year), 1, 1)
end = datetime(int(year), 12, 31)
elif type == 2:
quarters = list(rrule.rrule(rrule.MONTHLY,
bymonth=(1, 4, 7, 10),
bysetpos=-1,
dtstart=datetime(year, 1, 1),
count=5))
start = quarters[month_or_quarter - 1]
end = quarters[month_or_quarter] - relativedelta(days=1)
else:
start = datetime(year, month_or_quarter, 1)
end = start + relativedelta(months=1) - relativedelta(days=1)
return DateSpan(start, end, format, inclusive, timezone)
class MonthQuarterYearMixin(object):
_datespan = None
@property
def datespan(self):
if self._datespan is None:
datespan = ILSDateSpan.get_date(self.type, self.first, self.second)
self.request.datespan = datespan
self.context.update(dict(datespan=datespan))
self._datespan = datespan
return self._datespan
@property
def type(self):
"""
We have a 3 possible type:
1 - month
2 - quarter
3 - year
"""
if self.request_params.get('datespan_type'):
return int(self.request_params['datespan_type'])
else:
return 1
@property
def first(self):
"""
If we choose type 1 in this we get a month [00-12]
If we choose type 2 we get quarter [1-4]
This property is unused when we choose type 3
"""
if self.request_params.get('datespan_first'):
return int(self.request_params['datespan_first'])
else:
return datetime.utcnow().month
@property
def second(self):
if self.request_params.get('datespan_second'):
return int(self.request_params['datespan_second'])
else:
return datetime.utcnow().year
class MultiReport(SqlTabularReport, ILSMixin, CustomProjectReport,
ProjectReportParametersMixin, MonthQuarterYearMixin):
title = ''
report_template_path = "ilsgateway/multi_report.html"
flush_layout = True
with_tabs = False
use_datatables = False
exportable = False
base_template = 'ilsgateway/base_template.html'
emailable = False
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(MultiReport, self).decorator_dispatcher(request, *args, **kwargs)
@classmethod
def get_url(cls, domain=None, render_as=None, **kwargs):
url = super(MultiReport, cls).get_url(domain=domain, render_as=None, kwargs=kwargs)
request = kwargs.get('request')
user = getattr(request, 'couch_user', None)
dm = user.get_domain_membership(domain) if user else None
if dm:
if dm.program_id:
program_id = dm.program_id
else:
program_id = 'all'
url = '%s?location_id=%s&filter_by_program=%s' % (
url,
dm.location_id if dm.location_id else SQLLocation.objects.get(
domain=domain, location_type__name='MOHSW'
).location_id,
program_id if program_id else ''
)
return url
@property
def location(self):
if hasattr(self, 'request') and self.request.GET.get('location_id', ''):
return SQLLocation.objects.get(location_id=self.request.GET.get('location_id', ''))
else:
return SQLLocation.objects.filter(location_type__name='MOHSW', domain=self.domain)[0]
@property
@memoized
def rendered_report_title(self):
return self.title
@property
@memoized
def data_providers(self):
return []
@property
def title_month(self):
days = self.datespan.enddate - self.datespan.startdate
if days.days <= 31:
return self.datespan.startdate.strftime('%B, %Y')
else:
return '{0} - {1}'.format(self.datespan.startdate.strftime('%B'),
self.datespan.enddate.strftime('%B, %Y'))
@property
def report_config(self):
org_summary = OrganizationSummary.objects.filter(
date__range=(self.datespan.startdate, self.datespan.enddate),
location_id=self.location.location_id
)
config = dict(
domain=self.domain,
org_summary=org_summary if len(org_summary) > 0 else None,
startdate=self.datespan.startdate,
enddate=self.datespan.enddate,
datespan_type=self.type,
datespan_first=self.first,
datespan_second=self.second,
location_id=self.location.location_id,
soh_month=True if self.request.GET.get('soh_month', '') == 'True' else False,
products=[],
program='',
prd_part_url='',
timezone=self.timezone
)
if 'filter_by_program' in self.request.GET:
program = self.request.GET.get('filter_by_program', '')
if program and program != ALL_OPTION:
products_list = self.request.GET.getlist('filter_by_product')
if (products_list and products_list[0] == ALL_OPTION) or not products_list:
products = SQLProduct.objects.filter(program_id=program, is_archived=False)\
.order_by('code')\
.values_list('product_id', flat=True)
prd_part_url = '&filter_by_product=%s' % ALL_OPTION
else:
products = SQLProduct.objects.filter(
pk__in=products_list,
is_archived=False
).order_by('code').values_list('product_id', flat=True)
prd_part_url = "".join(["&filter_by_product=%s" % product for product in products_list])
else:
products = SQLProduct.objects.filter(
domain=self.domain,
is_archived=False
).order_by('code').values_list('product_id', flat=True)
prd_part_url = "&filter_by_product="
config.update(dict(products=products, program=program, prd_part_url=prd_part_url))
return config
@property
def report_context(self):
context = {
'reports': [self.get_report_context(dp) for dp in self.data_providers],
'title': self.title,
'report_facilities_url': self.report_facilities_url,
'location_type': self.location.location_type.name if self.location else None
}
return context
def get_report_context(self, data_provider):
total_row = []
self.data_source = data_provider
headers = []
rows = []
if not self.needs_filters and data_provider.show_table:
headers = data_provider.headers
rows = data_provider.rows
context = dict(
report_table=dict(
title=data_provider.title,
title_url=data_provider.title_url,
title_url_name=data_provider.title_url_name,
datatables=data_provider.use_datatables,
slug=data_provider.slug,
headers=headers,
rows=rows,
total_row=total_row,
start_at_row=0,
subtitle=data_provider.subtitle,
location=self.location.id if self.location else '',
default_rows=data_provider.default_rows,
searchable=data_provider.searchable
),
show_table=data_provider.show_table,
show_chart=data_provider.show_chart,
charts=data_provider.charts if data_provider.show_chart else [],
chart_span=12,
css_class=data_provider.css_class,
)
return context
@property
def export_table(self):
default_value = [['Sheet1', [[]]]]
self.export_format_override = self.request.GET.get('format', Format.XLS)
reports = [r['report_table'] for r in self.report_context['reports']]
export = [self._export_table(r['title'], r['headers'], r['rows'], total_row=r['total_row'])
for r in reports if r['headers']]
return export if export else default_value
def _export_table(self, export_sheet_name, headers, formatted_rows, total_row=None):
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
replace = ''
for row in rows:
for index, value in enumerate(row):
row[index] = GenericTabularReport._strip_tags(value)
# make headers and subheaders consistent
for k, v in enumerate(table[0]):
if v != ' ':
replace = v
else:
table[0][k] = replace
table.extend(rows)
if total_row:
table.append(_unformat_row(total_row))
return [export_sheet_name, table]
class DetailsReport(MultiReport):
with_tabs = True
flush_layout = True
exportable = True
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
@property
def with_tabs(self):
return self.location and self.location.location_type.name.upper() == 'FACILITY'
@property
def report_context(self):
context = super(DetailsReport, self).report_context
if 'location_id' in self.request_params:
context.update(
dict(
report_stockonhand_url=self.report_stockonhand_url,
report_rand_url=self.report_rand_url,
report_supervision_url=self.report_supervision_url,
report_delivery_url=self.report_delivery_url,
with_tabs=True
)
)
return context
def ils_make_url(self, cls):
params = '?location_id=%s&filter_by_program=%s&datespan_type=%s&datespan_first=%s&datespan_second=%s'
return make_url(cls, self.domain, params, (
self.request.GET.get('location_id'),
self.request.GET.get('filter_by_program'),
self.request.GET.get('datespan_type', ''),
self.request.GET.get('datespan_first', ''),
self.request.GET.get('datespan_second', ''),
))
@property
def report_stockonhand_url(self):
from custom.ilsgateway.tanzania.reports.stock_on_hand import StockOnHandReport
return self.ils_make_url(StockOnHandReport)
@property
def report_rand_url(self):
from custom.ilsgateway.tanzania.reports.randr import RRreport
return self.ils_make_url(RRreport)
@property
def report_supervision_url(self):
from custom.ilsgateway.tanzania.reports.supervision import SupervisionReport
return self.ils_make_url(SupervisionReport)
@property
def report_delivery_url(self):
from custom.ilsgateway.tanzania.reports.delivery import DeliveryReport
return self.ils_make_url(DeliveryReport)
|
"""
Incrementally tweak specified axes. Build new faces!
"""
from argparse import ArgumentParser
import pprint
import numpy as np
from pylearn2.gui.patch_viewer import PatchViewer
import theano
from adversarial import sampler, util
parser = ArgumentParser(description=('Experiment with tweaking each '
'axis incrementally.'))
parser.add_argument('-s', '--conditional-sampler', default='random',
choices=sampler.conditional_samplers.values(),
type=lambda k: sampler.conditional_samplers[k])
parser.add_argument('model_path')
parser.add_argument('embedding_file')
parser.add_argument('-a', '--axes',
help='Comma-separated list of axes to modify')
args = parser.parse_args()
embeddings = np.load(args.embedding_file)['arr_0']
if args.axes is None:
args.axes = range(embeddings.shape[1])
else:
args.axes = [int(x) for x in args.axes.strip().split(',')]
condition_dim = embeddings.shape[1]
m, n = len(args.axes), 10
shift = 7.5
generator = util.load_generator_from_file(args.model_path)
noise_batch = generator.noise_space.make_theano_batch()
conditional_batch = generator.condition_space.make_theano_batch()
topo_sample_f = theano.function([noise_batch, conditional_batch],
generator.dropout_fprop((noise_batch, conditional_batch))[0])
noise_data = generator.get_noise((n, generator.noise_dim)).eval()
base_conditional_data = args.conditional_sampler(generator, n, 1,
embedding_file=args.embedding_file)
print 'Mean for each axis:'
pprint.pprint(zip(args.axes, base_conditional_data[:, args.axes].mean(axis=1)))
base_conditional_data[:, args.axes] -= 0.5 * shift
mod_conditional_data = base_conditional_data.copy()
mod_conditional_steps = []
for axis in args.axes:
mod_conditional_data[:, axis] += shift
mod_conditional_steps.extend(mod_conditional_data.copy())
mod_conditional_steps = np.array(mod_conditional_steps)
samples_orig = topo_sample_f(noise_data, base_conditional_data).swapaxes(0, 3)
samples_mod = topo_sample_f(np.tile(noise_data, (m, 1)), mod_conditional_steps).swapaxes(0, 3)
pv = PatchViewer(grid_shape=(m + 1, n), patch_shape=(32,32),
is_color=True)
for sample_orig in samples_orig:
pv.add_patch(sample_orig, activation=1)
for sample_mod in samples_mod:
pv.add_patch(sample_mod)
pv.show()
|
import base64
import logging
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from djzendesk.signals import target_callback_received
def is_authenticated(request, username, password):
"""Authenticate the request using HTTP Basic authorization"""
authenticated = False
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == "basic":
provided_username, provided_password = base64.b64decode(auth[1]).split(':')
if username == provided_username and password == provided_password:
authenticated = True
return authenticated
@csrf_exempt
def callback(request, ticket_id):
"""Handle HTTP callback requests from Zendesk"""
# Require POST. Anything else would be uncivilized.
if not request.method == 'POST':
return HttpResponseNotAllowed(['POST'])
username = getattr(settings, 'ZENDESK_CALLBACK_USERNAME', None)
password = getattr(settings, 'ZENDESK_CALLBACK_PASSWORD', None)
# Authenticate the request if credentials have been configured
if username is not None and password is not None:
if not is_authenticated(request, username, password):
return HttpResponseForbidden()
# Extract the message
if not 'message' in request.POST:
return HttpResponseBadRequest()
message = request.POST['message']
logging.info("HTTP callback received from Zendesk for ticket %s: %s", ticket_id, message)
# Fire the signal to notify listeners of received target callback
target_callback_received.send(sender=None, ticket_id=ticket_id, message=message)
return HttpResponse('OK')
|
"""
Created on Fri Mar 01 14:56:56 2013
Author: Josef Perktold
"""
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_array_less
from statsmodels.stats.proportion import proportion_confint
import statsmodels.stats.proportion as smprop
from statsmodels.tools.sm_exceptions import HypothesisTestWarning
class Holder(object):
pass
def test_confint_proportion():
from .results.results_proportion import res_binom, res_binom_methods
methods = {'agresti_coull' : 'agresti-coull',
'normal' : 'asymptotic',
'beta' : 'exact',
'wilson' : 'wilson',
'jeffrey' : 'bayes'
}
for case in res_binom:
count, nobs = case
for method in methods:
idx = res_binom_methods.index(methods[method])
res_low = res_binom[case].ci_low[idx]
res_upp = res_binom[case].ci_upp[idx]
if np.isnan(res_low) or np.isnan(res_upp):
continue
ci = proportion_confint(count, nobs, alpha=0.05, method=method)
assert_almost_equal(ci, [res_low, res_upp], decimal=6,
err_msg=repr(case) + method)
def test_samplesize_confidenceinterval_prop():
#consistency test for samplesize to achieve confidence_interval
nobs = 20
ci = smprop.proportion_confint(12, nobs, alpha=0.05, method='normal')
res = smprop.samplesize_confint_proportion(12./nobs, (ci[1] - ci[0]) / 2)
assert_almost_equal(res, nobs, decimal=13)
def test_proportion_effect_size():
# example from blog
es = smprop.proportion_effectsize(0.5, 0.4)
assert_almost_equal(es, 0.2013579207903309, decimal=13)
class CheckProportionMixin(object):
def test_proptest(self):
# equality of k-samples
pt = smprop.proportions_chisquare(self.n_success, self.nobs, value=None)
assert_almost_equal(pt[0], self.res_prop_test.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test.p_value, decimal=13)
# several against value
pt = smprop.proportions_chisquare(self.n_success, self.nobs,
value=self.res_prop_test_val.null_value[0])
assert_almost_equal(pt[0], self.res_prop_test_val.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test_val.p_value, decimal=13)
# one proportion against value
pt = smprop.proportions_chisquare(self.n_success[0], self.nobs[0],
value=self.res_prop_test_1.null_value)
assert_almost_equal(pt[0], self.res_prop_test_1.statistic, decimal=13)
assert_almost_equal(pt[1], self.res_prop_test_1.p_value, decimal=13)
def test_pairwiseproptest(self):
ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,
multitest_method=None)
assert_almost_equal(ppt.pvals_raw, self.res_ppt_pvals_raw)
ppt = smprop.proportions_chisquare_allpairs(self.n_success, self.nobs,
multitest_method='h')
assert_almost_equal(ppt.pval_corrected(), self.res_ppt_pvals_holm)
pptd = smprop.proportions_chisquare_pairscontrol(self.n_success,
self.nobs, multitest_method='hommel')
assert_almost_equal(pptd.pvals_raw, ppt.pvals_raw[:len(self.nobs) - 1],
decimal=13)
def test_number_pairs_1493(self):
ppt = smprop.proportions_chisquare_allpairs(self.n_success[:3],
self.nobs[:3],
multitest_method=None)
assert_equal(len(ppt.pvals_raw), 3)
idx = [0, 1, 3]
assert_almost_equal(ppt.pvals_raw, self.res_ppt_pvals_raw[idx])
class TestProportion(CheckProportionMixin):
def setup(self):
self.n_success = np.array([ 73, 90, 114, 75])
self.nobs = np.array([ 86, 93, 136, 82])
self.res_ppt_pvals_raw = np.array([
0.00533824886503131, 0.8327574849753566, 0.1880573726722516,
0.002026764254350234, 0.1309487516334318, 0.1076118730631731
])
self.res_ppt_pvals_holm = np.array([
0.02669124432515654, 0.8327574849753566, 0.4304474922526926,
0.0121605855261014, 0.4304474922526926, 0.4304474922526926
])
res_prop_test = Holder()
res_prop_test.statistic = 11.11938768628861
res_prop_test.parameter = 3
res_prop_test.p_value = 0.011097511366581344
res_prop_test.estimate = np.array([
0.848837209302326, 0.967741935483871, 0.838235294117647,
0.9146341463414634
]).reshape(4,1, order='F')
res_prop_test.null_value = '''NULL'''
res_prop_test.conf_int = '''NULL'''
res_prop_test.alternative = 'two.sided'
res_prop_test.method = '4-sample test for equality of proportions ' + \
'without continuity correction'
res_prop_test.data_name = 'smokers2 out of patients'
self.res_prop_test = res_prop_test
#> pt = prop.test(smokers2, patients, p=rep(c(0.9), 4), correct=FALSE)
#> cat_items(pt, "res_prop_test_val.")
res_prop_test_val = Holder()
res_prop_test_val.statistic = np.array([
13.20305530710751
]).reshape(1,1, order='F')
res_prop_test_val.parameter = np.array([
4
]).reshape(1,1, order='F')
res_prop_test_val.p_value = 0.010325090041836
res_prop_test_val.estimate = np.array([
0.848837209302326, 0.967741935483871, 0.838235294117647,
0.9146341463414634
]).reshape(4,1, order='F')
res_prop_test_val.null_value = np.array([
0.9, 0.9, 0.9, 0.9
]).reshape(4,1, order='F')
res_prop_test_val.conf_int = '''NULL'''
res_prop_test_val.alternative = 'two.sided'
res_prop_test_val.method = '4-sample test for given proportions without continuity correction'
res_prop_test_val.data_name = 'smokers2 out of patients, null probabilities rep(c(0.9), 4)'
self.res_prop_test_val = res_prop_test_val
#> pt = prop.test(smokers2[1], patients[1], p=0.9, correct=FALSE)
#> cat_items(pt, "res_prop_test_1.")
res_prop_test_1 = Holder()
res_prop_test_1.statistic = 2.501291989664086
res_prop_test_1.parameter = 1
res_prop_test_1.p_value = 0.113752943640092
res_prop_test_1.estimate = 0.848837209302326
res_prop_test_1.null_value = 0.9
res_prop_test_1.conf_int = np.array([0.758364348004061,
0.9094787701686766])
res_prop_test_1.alternative = 'two.sided'
res_prop_test_1.method = '1-sample proportions test without continuity correction'
res_prop_test_1.data_name = 'smokers2[1] out of patients[1], null probability 0.9'
self.res_prop_test_1 = res_prop_test_1
def test_binom_test():
#> bt = binom.test(51,235,(1/6),alternative="less")
#> cat_items(bt, "binom_test_less.")
binom_test_less = Holder()
binom_test_less.statistic = 51
binom_test_less.parameter = 235
binom_test_less.p_value = 0.982022657605858
binom_test_less.conf_int = [0, 0.2659460862574313]
binom_test_less.estimate = 0.2170212765957447
binom_test_less.null_value = 1. / 6
binom_test_less.alternative = 'less'
binom_test_less.method = 'Exact binomial test'
binom_test_less.data_name = '51 and 235'
#> bt = binom.test(51,235,(1/6),alternative="greater")
#> cat_items(bt, "binom_test_greater.")
binom_test_greater = Holder()
binom_test_greater.statistic = 51
binom_test_greater.parameter = 235
binom_test_greater.p_value = 0.02654424571169085
binom_test_greater.conf_int = [0.1735252778065201, 1]
binom_test_greater.estimate = 0.2170212765957447
binom_test_greater.null_value = 1. / 6
binom_test_greater.alternative = 'greater'
binom_test_greater.method = 'Exact binomial test'
binom_test_greater.data_name = '51 and 235'
#> bt = binom.test(51,235,(1/6),alternative="t")
#> cat_items(bt, "binom_test_2sided.")
binom_test_2sided = Holder()
binom_test_2sided.statistic = 51
binom_test_2sided.parameter = 235
binom_test_2sided.p_value = 0.0437479701823997
binom_test_2sided.conf_int = [0.1660633298083073, 0.2752683640289254]
binom_test_2sided.estimate = 0.2170212765957447
binom_test_2sided.null_value = 1. / 6
binom_test_2sided.alternative = 'two.sided'
binom_test_2sided.method = 'Exact binomial test'
binom_test_2sided.data_name = '51 and 235'
alltests = [('larger', binom_test_greater),
('smaller', binom_test_less),
('two-sided', binom_test_2sided)]
for alt, res0 in alltests:
# only p-value is returned
res = smprop.binom_test(51, 235, prop=1. / 6, alternative=alt)
#assert_almost_equal(res[0], res0.statistic)
assert_almost_equal(res, res0.p_value, decimal=13)
# R binom_test returns Copper-Pearson confint
ci_2s = smprop.proportion_confint(51, 235, alpha=0.05, method='beta')
ci_low, ci_upp = smprop.proportion_confint(51, 235, alpha=0.1,
method='beta')
assert_almost_equal(ci_2s, binom_test_2sided.conf_int, decimal=13)
assert_almost_equal(ci_upp, binom_test_less.conf_int[1], decimal=13)
assert_almost_equal(ci_low, binom_test_greater.conf_int[0], decimal=13)
def test_binom_rejection_interval():
# consistency check with binom_test
# some code duplication but limit checks are different
alpha = 0.05
nobs = 200
prop = 12./20
alternative='smaller'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_upp, nobs)
pval = smprop.binom_test(ci_low, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_low + 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='larger'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
assert_equal(ci_low, 0)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
alternative='two-sided'
ci_low, ci_upp = smprop.binom_test_reject_interval(prop, nobs, alpha=alpha,
alternative=alternative)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
pval = smprop.binom_test(ci_upp, nobs, prop=prop,
alternative=alternative)
assert_array_less(pval, alpha)
pval = smprop.binom_test(ci_upp - 1, nobs, prop=prop,
alternative=alternative)
assert_array_less(alpha, pval)
def test_binom_tost():
# consistency check with two different implementation,
# proportion_confint is tested against R
# no reference case from other package available
ci = smprop.proportion_confint(10, 20, method='beta', alpha=0.1)
bt = smprop.binom_tost(10, 20, *ci)
assert_almost_equal(bt, [0.05] * 3, decimal=12)
ci = smprop.proportion_confint(5, 20, method='beta', alpha=0.1)
bt = smprop.binom_tost(5, 20, *ci)
assert_almost_equal(bt, [0.05] * 3, decimal=12)
# vectorized, TODO: observed proportion = 0 returns nan
ci = smprop.proportion_confint(np.arange(1, 20), 20, method='beta',
alpha=0.05)
bt = smprop.binom_tost(np.arange(1, 20), 20, *ci)
bt = np.asarray(bt)
assert_almost_equal(bt, 0.025 * np.ones(bt.shape), decimal=12)
def test_power_binom_tost():
# comparison numbers from PASS manual
p_alt = 0.6 + np.linspace(0, 0.09, 10)
power = smprop.power_binom_tost(0.5, 0.7, 500, p_alt=p_alt, alpha=0.05)
res_power = np.array([0.9965, 0.9940, 0.9815, 0.9482, 0.8783, 0.7583,
0.5914, 0.4041, 0.2352, 0.1139])
assert_almost_equal(power, res_power, decimal=4)
rej_int = smprop.binom_tost_reject_interval(0.5, 0.7, 500)
res_rej_int = (269, 332)
assert_equal(rej_int, res_rej_int)
# TODO: actual alpha=0.0489 for all p_alt above
# another case
nobs = np.arange(20, 210, 20)
power = smprop.power_binom_tost(0.4, 0.6, nobs, p_alt=0.5, alpha=0.05)
res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_power_ztost_prop():
power = smprop.power_ztost_prop(0.1, 0.9, 10, p_alt=0.6, alpha=0.05,
discrete=True, dist='binom')[0]
assert_almost_equal(power, 0.8204, decimal=4) # PASS example
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore", HypothesisTestWarning)
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom')[0]
res_power = np.array([ 0., 0., 0., 0.0889, 0.2356, 0.4770, 0.5530,
0.6154, 0.7365, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# with critval_continuity correction
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom', variance_prop=None,
continuity=2, critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7708])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='binom', variance_prop=0.5,
critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.0889, 0.2356, 0.3517, 0.4457,
0.6154, 0.6674, 0.7112])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_ztost():
xfair = np.repeat([1,0], [228, 762-228])
# comparing to SAS last output at
# http://support.sas.com/documentation/cdl/en/procstat/63104/HTML/default/viewer.htm#procstat_freq_sect028.htm
# confidence interval for tost
# generic ztost is moved to weightstats
from statsmodels.stats.weightstats import zconfint, ztost
ci01 = zconfint(xfair, alpha=0.1, ddof=0)
assert_almost_equal(ci01, [0.2719, 0.3265], 4)
res = ztost(xfair, 0.18, 0.38, ddof=0)
assert_almost_equal(res[1][0], 7.1865, 4)
assert_almost_equal(res[2][0], -4.8701, 4)
assert_array_less(res[0], 0.0001)
def test_power_ztost_prop_norm():
# regression test for normal distribution
# from a rough comparison, the results and variations look reasonable
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='norm', variance_prop=0.5,
continuity=0, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0.11450013, 0.27752006, 0.41495922,
0.52944621, 0.62382638, 0.70092914, 0.76341806])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=False,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=0)[0]
res_power = np.array([0., 0., 0.02667562, 0.20189793, 0.35099606,
0.47608598, 0.57981118, 0.66496683, 0.73427591,
0.79026127])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,
0.55312718, 0.61549537, 0.66743625, 0.77066806])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=0.5,
continuity=1, critval_continuity=1)[0]
res_power = np.array([0., 0., 0., 0.08902071, 0.23582284, 0.35192313,
0.44588687, 0.61549537, 0.66743625, 0.71115563])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
# regression test for normal distribution
power = smprop.power_ztost_prop(0.4, 0.6, np.arange(20, 210, 20),
p_alt=0.5, alpha=0.05, discrete=True,
dist='norm', variance_prop=None,
continuity=0, critval_continuity=0)[0]
res_power = np.array([0., 0., 0., 0., 0.15851942, 0.41611758,
0.5010377 , 0.5708047 , 0.70328247, 0.74210096])
# TODO: I currently don't impose power>=0, i.e np.maximum(power, 0)
assert_almost_equal(np.maximum(power, 0), res_power, decimal=4)
def test_proportion_ztests():
# currently only consistency test with proportions chisquare
# Note: alternative handling is generic
res1 = smprop.proportions_ztest(15, 20., value=0.5, prop_var=0.5)
res2 = smprop.proportions_chisquare(15, 20., value=0.5)
assert_almost_equal(res1[1], res2[1], decimal=13)
res1 = smprop.proportions_ztest(np.asarray([15, 10]), np.asarray([20., 20]),
value=0, prop_var=None)
res2 = smprop.proportions_chisquare(np.asarray([15, 10]), np.asarray([20., 20]))
# test only p-value
assert_almost_equal(res1[1], res2[1], decimal=13)
if __name__ == '__main__':
test_confint_proportion()
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import connection, transaction
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from nepal.account.decorators import role_required
from nepal.contrib.shortcuts import render_to_response, add_message, render_exception
from nepal.database.forms import DatabaseForm
from nepal.database.models import Database
from nepal.database.shortcuts import get_db_or_404
from nepal.resources.shortcuts import has_resources
def _sql_query(sql):
if settings.DEVELOPMENT:
return
cursor = connection.cursor()
cursor.execute(sql)
transaction.commit_unless_managed()
@role_required('C')
def listing(request):
databases = Database.objects.filter(profile=request.user)
limit_reached = not has_resources(request.user, 'databases')
template = 'database/list.html'
data = {'databases': databases, 'limit_reached': limit_reached}
return render_to_response(request, template, data)
@role_required('C')
def _create_or_edit(request, db_id):
if db_id:
database = get_db_or_404(request, db_id)
else:
database = Database(profile=request.user)
form = DatabaseForm(request.POST or None, instance=database, request=request)
if request.method == 'POST' and form.is_valid():
form.save()
dbname = "%s_%s" % (form.instance.profile.username, form.instance.name)
# XXX: escaping needed?
if db_id:
_sql_query("SET PASSWORD FOR '%s'@'localhost' = PASSWORD('%s')" % (dbname, form.instance.password))
else:
_sql_query("CREATE DATABASE `%s`" % (dbname,))
_sql_query("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'localhost' IDENTIFIED BY '%s'" % (
dbname, dbname, form.instance.password))
add_message(request, 'notice', _('Your changes have been saved to the database.'))
next = reverse('nepal-database-list')
return HttpResponseRedirect(next)
template = 'database/edit.html'
data = {'form': form, 'edit': db_id}
return render_to_response(request, template, data)
def create(request):
if not has_resources(request.user, 'databases'):
msg = _("You've already reached the maximum number of %s, you're allowed to create.")
return render_exception(request, msg % _('databases'))
return _create_or_edit(request, None)
def edit(request, db_id):
return _create_or_edit(request, db_id)
def delete(request, db_id):
database = get_db_or_404(request, db_id)
if request.method == 'POST':
try:
next = reverse('nepal-database-list')
database.delete()
dbname = "%s_%s" % (database.profile.username, database.name)
# XXX: escaping needed?
try:
_sql_query("DROP USER '%s'@'localhost'" % (dbname,))
except:
pass
try:
_sql_query("DROP DATABASE %s" % (dbname,))
except:
pass
add_message(request, 'notice', _('%s has been deleted.') % _('Database'))
return HttpResponseRedirect(next)
except Exception, e:
return render_exception(request, _("Couldn't delete database."), e)
data = {'database':database}
template = 'database/delete.html'
return render_to_response(request, template, data)
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.lt.forms import (LTIDCodeField, LTMunicipalitySelect,
LTCountySelect)
class LTLocalFlavorTests(SimpleTestCase):
def test_LTIDCodeField(self):
error_len = ['ID Code consists of exactly 11 decimal digits.']
error_check = ['Wrong ID Code checksum.']
error_date = ['ID Code contains invalid date.']
valid = {
'33309240064': '33309240064',
'35002125431': '35002125431',
'61205010081': '61205010081',
'48504140959': '48504140959',
}
invalid = {
'3456': error_len,
'123456789101': error_len,
'33309240065': error_check,
'hello': error_len,
'134535443i2': error_len,
'48504140956': error_check,
'48504140953': error_check,
'50520150003': error_date,
'50501009554': error_date,
'80101017318': error_date,
}
self.assertFieldOutput(LTIDCodeField, valid, invalid)
def test_LTCountySelect(self):
f = LTCountySelect()
expected = """
<select name="test">
<option value="alytus">Alytus</option>
<option value="kaunas">Kaunas</option>
<option value="klaipeda">Klaipėda</option>
<option value="mariampole">Mariampolė</option>
<option value="panevezys">Panevėžys</option>
<option value="siauliai">Šiauliai</option>
<option value="taurage">Tauragė</option>
<option value="telsiai">Telšiai</option>
<option value="utena">Utena</option>
<option value="vilnius">Vilnius</option>
</select>
"""
self.assertHTMLEqual(f.render('test', None), expected)
def test_LTMunicipalitySelect(self):
f = LTMunicipalitySelect()
expected = """
<select name="test">
<option value="akmene">Akmenė district</option>
<option value="alytus_c">Alytus city</option>
<option value="alytus">Alytus district</option>
<option value="anyksciai">Anykščiai district</option>
<option value="birstonas">Birštonas</option>
<option value="birzai">Biržai district</option>
<option value="druskininkai">Druskininkai</option>
<option value="elektrenai">Elektrėnai</option>
<option value="ignalina">Ignalina district</option>
<option value="jonava">Jonava district</option>
<option value="joniskis">Joniškis district</option>
<option value="jurbarkas">Jurbarkas district</option>
<option value="kaisiadorys">Kaišiadorys district</option>
<option value="kalvarija">Kalvarija</option>
<option value="kaunas_c">Kaunas city</option>
<option value="kaunas">Kaunas district</option>
<option value="kazluruda">Kazlų Rūda</option>
<option value="kedainiai">Kėdainiai district</option>
<option value="kelme">Kelmė district</option>
<option value="klaipeda_c">Klaipėda city</option>
<option value="klaipeda">Klaipėda district</option>
<option value="kretinga">Kretinga district</option>
<option value="kupiskis">Kupiškis district</option>
<option value="lazdijai">Lazdijai district</option>
<option value="marijampole">Marijampolė</option>
<option value="mazeikiai">Mažeikiai district</option>
<option value="moletai">Molėtai district</option>
<option value="neringa">Neringa</option>
<option value="pagegiai">Pagėgiai</option>
<option value="pakruojis">Pakruojis district</option>
<option value="palanga">Palanga city</option>
<option value="panevezys_c">Panevėžys city</option>
<option value="panevezys">Panevėžys district</option>
<option value="pasvalys">Pasvalys district</option>
<option value="plunge">Plungė district</option>
<option value="prienai">Prienai district</option>
<option value="radviliskis">Radviliškis district</option>
<option value="raseiniai">Raseiniai district</option>
<option value="rietavas">Rietavas</option>
<option value="rokiskis">Rokiškis district</option>
<option value="skuodas">Skuodas district</option>
<option value="sakiai">Šakiai district</option>
<option value="salcininkai">Šalčininkai district</option>
<option value="siauliai_c">Šiauliai city</option>
<option value="siauliai">Šiauliai district</option>
<option value="silale">Šilalė district</option>
<option value="silute">Šilutė district</option>
<option value="sirvintos">Širvintos district</option>
<option value="svencionys">Švenčionys district</option>
<option value="taurage">Tauragė district</option>
<option value="telsiai">Telšiai district</option>
<option value="trakai">Trakai district</option>
<option value="ukmerge">Ukmergė district</option>
<option value="utena">Utena district</option>
<option value="varena">Varėna district</option>
<option value="vilkaviskis">Vilkaviškis district</option>
<option value="vilnius_c">Vilnius city</option>
<option value="vilnius">Vilnius district</option>
<option value="visaginas">Visaginas</option>
<option value="zarasai">Zarasai district</option>
</select>
"""
self.assertHTMLEqual(f.render('test', None), expected)
|
from zeit.cms.i18n import MessageFactory as _
from zope.browserpage import ViewPageTemplateFile
import pkg_resources
import zeit.cms.browser.objectdetails
import zeit.edit.browser.form
import zope.formlib.form
import zope.formlib.interfaces
class ReferenceDetailsHeading(zeit.cms.browser.objectdetails.Details):
template = ViewPageTemplateFile(pkg_resources.resource_filename(
'zeit.cms.browser', 'object-details-heading.pt'))
def __init__(self, context, request):
super(ReferenceDetailsHeading, self).__init__(context.target, request)
def __call__(self):
return self.template()
class Edit(zeit.edit.browser.form.InlineForm):
legend = ''
undo_description = _('edit author biography')
form_fields = zope.formlib.form.FormFields(
zeit.content.author.interfaces.IAuthorBioReference,
# support read-only mode, see
# zeit.content.article.edit.browser.form.FormFields
render_context=zope.formlib.interfaces.DISPLAY_UNWRITEABLE).select(
'biography')
@property
def prefix(self):
return 'reference-details-%s' % self.context.target.uniqueId
|
"""
WSGI config for django_export_xls project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_export_xls.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
from math import sqrt
from shapely import affinity
GM = (sqrt(5) - 1.0) / 2.0
W = 8.0
H = W * GM
SIZE = (W, H)
BLUE = "#6699cc"
GRAY = "#999999"
DARKGRAY = "#333333"
YELLOW = "#ffcc33"
GREEN = "#339933"
RED = "#ff3333"
BLACK = "#000000"
COLOR_ISVALID = {
True: BLUE,
False: RED,
}
def plot_line(ax, ob, color=GRAY, zorder=1, linewidth=3, alpha=1):
x, y = ob.xy
ax.plot(x, y, color=color, linewidth=linewidth, solid_capstyle="round", zorder=zorder, alpha=alpha)
def plot_coords(ax, ob, color=GRAY, zorder=1, alpha=1):
x, y = ob.xy
ax.plot(x, y, "o", color=color, zorder=zorder, alpha=alpha)
def color_isvalid(ob, valid=BLUE, invalid=RED):
if ob.is_valid:
return valid
else:
return invalid
def color_issimple(ob, simple=BLUE, complex=YELLOW):
if ob.is_simple:
return simple
else:
return complex
def plot_line_isvalid(ax, ob, **kwargs):
kwargs["color"] = color_isvalid(ob)
plot_line(ax, ob, **kwargs)
def plot_line_issimple(ax, ob, **kwargs):
kwargs["color"] = color_issimple(ob)
plot_line(ax, ob, **kwargs)
def plot_bounds(ax, ob, zorder=1, alpha=1):
x, y = zip(*list((p.x, p.y) for p in ob.boundary))
ax.plot(x, y, "o", color=BLACK, zorder=zorder, alpha=alpha)
def add_origin(ax, geom, origin):
x, y = xy = affinity.interpret_origin(geom, origin, 2)
ax.plot(x, y, "o", color=GRAY, zorder=1)
ax.annotate(str(xy), xy=xy, ha="center", textcoords="offset points", xytext=(0, 8))
def set_limits(ax, x0, xN, y0, yN, dx=1, dy=1):
ax.set_xlim(x0, xN)
ax.set_xticks(range(x0, xN + 1, dx))
ax.set_ylim(y0, yN)
ax.set_yticks(range(y0, yN + 1, dy))
ax.set_aspect("equal")
|
from django.core.exceptions import ValidationError
def validate_approximatedate(date):
if date.month == 0:
raise ValidationError(
'Event date can\'t be a year only. '
'Please, provide at least a month and a year.'
)
|
import pickle
import numpy as np
import os
from vec_hsqc import pred_vec
curdir = os.path.dirname( os.path.abspath( __file__ ) )
with open( os.path.join( curdir, 'training_eg_01.pickle'), 'r' ) as f:
fdd = pickle.load(f).full_data_dict
a = pred_vec.ProbEst( )
a.import_data( fdd )
a.extract_features( )
np.savetxt( os.path.join( curdir, 'pred_eg_01_X' ), a.Xtot )
np.savetxt( os.path.join( curdir, 'pred_eg_01_Y' ), a.Ytot )
np.savetxt( os.path.join( curdir, 'pred_eg_01_R_matrix.csv' ), a.R_matrix )
np.savetxt( os.path.join( curdir, 'pred_eg_01_legmat' ), a.legmat, fmt = "%s" )
|
from enum import IntEnum
import datetime
from project.bl.utils import Resource
from project.extensions import db
from project.lib.orm.types import TypeEnum, GUID
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.ext.associationproxy import association_proxy
from project.lib.orm.conditions import ConditionDeleted, ConditionHidden
class Vacancy(db.Model):
__tablename__ = 'vacancies'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
short_description = db.Column(db.String(300), nullable=False)
text = db.Column(db.Text(), nullable=False)
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
category = db.relationship('Category', backref=db.backref('vacancies'))
name_in_url = db.Column(db.String(50), nullable=False, unique=True)
visits = db.Column(db.Integer, nullable=False, default=0)
salary = db.Column(db.String(50))
description = db.Column(db.String(200)) # for search spider
keywords = db.Column(db.String(1000))
city_id = db.Column(db.Integer, db.ForeignKey('cities.id'))
city = db.relationship('City', backref=db.backref('vacancies'))
is_hidden = db.Column(db.Boolean, nullable=False, default=False)
is_deleted = db.Column(db.Boolean, nullable=False, default=False)
updated_at = db.Column(db.DateTime, default=datetime.datetime.now,
onupdate=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
who_updated = db.relationship('User')
condition_is_hidden = ConditionHidden()
condition_is_deleted = ConditionDeleted()
bl = Resource("bl.vacancy")
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.title)
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False, unique=True)
bl = Resource('bl.category')
def __str__(self):
return self.name
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.name)
class User(db.Model):
__tablename__ = 'users'
# noinspection PyTypeChecker
ROLE = IntEnum('Role', {
'staff': 0,
'superuser': 1,
})
id = db.Column(db.Integer, primary_key=True)
login = db.Column(db.String(30), unique=True, nullable=False)
password = db.Column(db.String(100), nullable=False)
name = db.Column(db.String(30))
surname = db.Column(db.String(30))
email = db.Column(db.String(320), nullable=False, unique=True)
role = db.Column(TypeEnum(ROLE), nullable=False, default=ROLE.staff)
bl = Resource('bl.user')
def __repr__(self):
return '{} ({})'.format(self.login, self.get_full_name())
def get_full_name(self):
return '{} {}'.format(self.name or '', self.surname or '').strip()
def is_superuser(self):
return self.role == self.ROLE.superuser
class City(db.Model):
__tablename__ = 'cities'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False, unique=True)
bl = Resource('bl.city')
def __str__(self):
return self.name
def __repr__(self):
return "[{}] {}".format(self.__class__.__name__, self.name)
class BlockPageAssociation(db.Model):
__tablename__ = 'block_page_associations'
page_id = db.Column(
db.Integer,
db.ForeignKey('pages.id'),
primary_key=True
)
block_id = db.Column(
db.Integer,
db.ForeignKey('pageblocks.id'),
primary_key=True
)
position = db.Column(db.Integer)
block = db.relationship(
'PageBlock',
)
class PageChunk(db.Model):
__tablename__ = 'pagechunks'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, unique=True, nullable=False) # use in template
title = db.Column(db.Text, unique=True, nullable=False)
text = db.Column(db.Text)
bl = Resource('bl.pagechunk')
class PageBlock(db.Model):
__tablename__ = 'pageblocks'
# noinspection PyTypeChecker
TYPE = IntEnum(
'Block_type',
{
'img_left': 0,
'img_right': 1,
'no_img': 2,
},
)
id = db.Column(db.Integer, primary_key=True)
block_type = db.Column(
TypeEnum(TYPE),
default=TYPE.img_left,
nullable=False
)
# header
title = db.Column(db.VARCHAR(128), nullable=True)
text = db.Column(db.Text)
# used for mainpage
short_description = db.Column(db.VARCHAR(256), nullable=True)
image = db.Column(db.Text, nullable=True)
bl = Resource('bl.pageblock')
def __str__(self):
return '%s: %s' % (self.title, self.text or self.short_description)
class Page(db.Model):
__tablename__ = 'pages'
# noinspection PyTypeChecker
TYPE = IntEnum('Page_type', {
'PROJECTS': 1,
'ABOUT': 2,
'CONTACTS': 3,
'MAINPAGE': 4,
})
id = db.Column(db.Integer, primary_key=True)
type = db.Column(TypeEnum(TYPE), unique=True, nullable=False)
title = db.Column(db.VARCHAR(128))
_blocks = db.relationship(
"BlockPageAssociation",
order_by='BlockPageAssociation.position',
collection_class=ordering_list('position'),
cascade='save-update, merge, delete, delete-orphan',
)
blocks = association_proxy(
'_blocks',
'block',
creator=lambda _pb: BlockPageAssociation(block=_pb)
)
bl = Resource('bl.page')
def __str__(self):
return '%s (%s)' % (self.title, self.url)
class Token(db.Model):
__tablename__ = 'tokens'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship(
'User',
)
token = db.Column(db.String, nullable=False)
bl = Resource('bl.token')
class MailTemplate(db.Model):
__tablename__ = 'mailtemplates'
# noinspection PyTypeChecker
MAIL = IntEnum('Mail', {
'CV': 0,
'REPLY': 1,
})
id = db.Column(db.Integer, primary_key=True)
mail = db.Column(TypeEnum(MAIL), nullable=False)
title = db.Column(db.String, nullable=False)
subject = db.Column(db.String(79), nullable=False)
html = db.Column(db.Text, nullable=False)
help_msg = db.Column(db.Text)
updated_at = db.Column(db.Date, onupdate=datetime.datetime.now,
default=datetime.datetime.now)
bl = Resource('bl.mailtemplate')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
who_updated = db.relationship('User')
def __repr__(self):
return str(self.title)
class UploadedImage(db.Model):
__tablename__ = 'uploaded_images'
IMG_CATEGORY = IntEnum('ImageCategory', {
'other': 0,
'gallery': 1,
})
id = db.Column(db.Integer, primary_key=True)
name = db.Column(GUID, nullable=False)
ext = db.Column(db.VARCHAR, nullable=False)
img_category = db.Column(
TypeEnum(IMG_CATEGORY),
default=IMG_CATEGORY.other,
nullable=False,
)
title = db.Column(db.VARCHAR(32))
description = db.Column(db.VARCHAR(128))
__table_args__ = (
db.UniqueConstraint(
'name',
'ext',
'img_category',
),
)
bl = Resource('bl.uploadedimage')
def init_db():
db.drop_all()
db.create_all()
|
import argparse
import numpy as np
import matplotlib.pyplot as plt
argParser = argparse.ArgumentParser()
argParser.add_argument('datafile')
args = argParser.parse_args()
time,signal = [],[]
fin = open(args.datafile, 'r')
for line in fin:
words = line.split(',')
time.append(float(words[1]))
signal.append(float(words[0]))
fin.close()
time,signal = np.array(time),np.array(signal)
samplingRate = time.size / time[-1]
plt.specgram(signal, Fs=samplingRate)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.colorbar().set_label('Amplitude (Frequency power)')
plt.show()
|
from __future__ import unicode_literals
try:
from bs4 import BeautifulSoup
except ImportError:
from BeautifulSoup import BeautifulSoup
import lxml.html
import lxml.html.clean
import re
import unicodedata
VERSION = (8,)
__version__ = '.'.join(map(str, VERSION))
__all__ = ('cleanse_html', 'Cleanse')
class Cleanse(object):
allowed_tags = {
'a': ('href', 'name', 'target', 'title'),
'h2': (),
'h3': (),
'strong': (),
'em': (),
'p': (),
'ul': (),
'ol': (),
'li': (),
'span': (),
'br': (),
'sub': (),
'sup': (),
}
empty_tags = ('br',)
merge_tags = ('h2', 'h3', 'strong', 'em', 'ul', 'ol', 'sub', 'sup')
def validate_href(self, href):
"""
Verify that a given href is benign and allowed.
This is a stupid check, which probably should be much more elaborate
to be safe.
"""
return href.startswith(
('/', 'mailto:', 'http:', 'https:', '#', 'tel:'))
def clean(self, element):
""" Hook for your own clean methods. """
return element
def cleanse(self, html):
"""
Clean HTML code from ugly copy-pasted CSS and empty elements
Removes everything not explicitly allowed in ``self.allowed_tags``.
Requires ``lxml`` and ``beautifulsoup``.
"""
html = '<anything>%s</anything>' % html
doc = lxml.html.fromstring(html)
try:
lxml.html.tostring(doc, encoding='utf-8')
except UnicodeDecodeError:
# fall back to slower BeautifulSoup if parsing failed
from lxml.html import soupparser
doc = soupparser.fromstring(html)
cleaner = lxml.html.clean.Cleaner(
allow_tags=list(self.allowed_tags.keys()) + ['style', 'anything'],
remove_unknown_tags=False, # preserve surrounding 'anything' tag
style=False, safe_attrs_only=False, # do not strip out style
# attributes; we still need
# the style information to
# convert spans into em/strong
# tags
)
cleaner(doc)
# walk the tree recursively, because we want to be able to remove
# previously emptied elements completely
for element in reversed(list(doc.iterdescendants())):
if element.tag == 'style':
element.drop_tree()
continue
# convert span elements into em/strong if a matching style rule
# has been found. strong has precedence, strong & em at the same
# time is not supported
elif element.tag == 'span':
style = element.attrib.get('style')
if style:
if 'bold' in style:
element.tag = 'strong'
elif 'italic' in style:
element.tag = 'em'
if element.tag == 'span': # still span
# remove tag, but preserve children and text
element.drop_tag()
continue
# remove empty tags if they are not <br />
elif (not element.text and
element.tag not in self.empty_tags and
not len(element)):
element.drop_tag()
continue
elif element.tag == 'li':
# remove p-in-li tags
for p in element.findall('p'):
if getattr(p, 'text', None):
p.text = ' ' + p.text + ' '
p.drop_tag()
# Hook for custom filters:
element = self.clean(element)
# remove all attributes which are not explicitly allowed
allowed = self.allowed_tags.get(element.tag, [])
for key in element.attrib.keys():
if key not in allowed:
del element.attrib[key]
# Clean hrefs so that they are benign
href = element.attrib.get('href', None)
if href is not None and not self.validate_href(href):
element.attrib['href'] = ''
# just to be sure, run cleaner again, but this time with even more
# strict settings
cleaner = lxml.html.clean.Cleaner(
allow_tags=list(self.allowed_tags.keys()) + ['anything'],
remove_unknown_tags=False, # preserve surrounding 'anything' tag
style=True, safe_attrs_only=True
)
cleaner(doc)
html = lxml.html.tostring(doc, method='xml').decode('utf-8')
# remove all sorts of newline characters
html = html.replace('\n', ' ').replace('\r', ' ')
html = html.replace(' ', ' ').replace(' ', ' ')
html = html.replace('
', ' ').replace('
', ' ')
# remove elements containing only whitespace or linebreaks
whitespace_re = re.compile(
r'<([a-z0-9]+)>(<br\s*/>|\ |\ |\s)*</\1>')
while True:
new = whitespace_re.sub('', html)
if new == html:
break
html = new
# merge tags
for tag in self.merge_tags:
merge_str = '\s*</%s>\s*<%s>\s*' % (tag, tag)
while True:
new = re.sub(merge_str, ' ', html)
if new == html:
break
html = new
# fix p-in-p tags
p_in_p_start_re = re.compile(r'<p>(\ |\ |\s)*<p>')
p_in_p_end_re = re.compile('</p>(\ |\ |\s)*</p>')
for tag in self.merge_tags:
merge_start_re = re.compile(
'<p>(\\ |\\ |\\s)*<%s>(\\ |\\ |\\s)*<p>'
% tag)
merge_end_re = re.compile(
'</p>(\\ |\\ |\\s)*</%s>(\\ |\\ |\\s)*</p>'
% tag)
while True:
new = merge_start_re.sub('<p>', html)
new = merge_end_re.sub('</p>', new)
new = p_in_p_start_re.sub('<p>', new)
new = p_in_p_end_re.sub('</p>', new)
if new == html:
break
html = new
# remove list markers with <li> tags before them
html = re.sub(
r'<li>(\ |\ |\s)*(-|\*|·)(\ |\ |\s)+',
'<li>',
html)
# add a space before the closing slash in empty tags
html = re.sub(r'<([^/>]+)/>', r'<\1 />', html)
# remove wrapping tag needed by XML parser
html = re.sub(r'</?anything( /)?>', '', html)
# nicify entities and normalize unicode
html = '%s' % BeautifulSoup(html, 'lxml')
html = unicodedata.normalize('NFKC', html)
html = re.sub(r'^<html><body>', '', html)
html = re.sub(r'</body></html>$', '', html)
# add a space before the closing slash in empty tags
html = re.sub(r'<([^/>]+)/>', r'<\1 />', html)
return html
def cleanse_html(html):
"""
Compat shim for older cleanse API
"""
return Cleanse().cleanse(html)
|
import numpy as np
import pandas as pd
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from base import TestBase
from PVGeo import interface
from PVGeo._helpers import xml
RTOL = 0.000001
class TestXML(TestBase):
"""
Test the XML Helpers to make sure no errors are thrown
"""
def test_simple(self):
"""XML: Make sure no errors arise"""
_ = xml.get_python_path_property()
_ = xml.get_reader_time_step_values('txt dat', 'A description')
m = xml.get_vtk_type_map()
self.assertEqual(m['vtkUnstructuredGrid'], 4)
_ = xml.get_property_xml(
'foo', 'SetFoo', 4, panel_visibility='default', help='foo help'
)
_ = xml.get_property_xml(
'foo', 'SetFoo', True, panel_visibility='default', help='foo help'
)
_ = xml.get_file_reader_xml(
'txt dat', reader_description='desc!!', command="AddFileName"
)
_ = xml.get_drop_down_xml(
'foo', 'SetFoo', ['foo1', 'foo2'], help='Help the foo', values=[1, 2]
)
_ = xml.get_input_array_xml(
labels=['foo'], nInputPorts=1, n_arrays=1, input_names='Input'
)
return
class TestDataFrameConversions(TestBase):
"""
Test the pandas DataFrames conversions to VTK data objects
"""
def test_df_to_table(self):
"""`table_to_data_frame`: test interface conversion for tables"""
names = ['x', 'y', 'z', 'a', 'b']
data = np.random.rand(100, len(names))
df = pd.DataFrame(data=data, columns=names)
table = vtk.vtkTable()
interface.data_frame_to_table(df, table)
wtbl = dsa.WrapDataObject(table)
# Now check the vtkTable
for i, name in enumerate(names):
# Check data aray names
self.assertEqual(table.GetColumnName(i), name)
# Check data contents
arr = wtbl.RowData[name]
self.assertTrue(np.allclose(arr, df[name].values, rtol=RTOL))
# Now test backwards compatability
dfo = interface.table_to_data_frame(table)
# self.assertTrue(df.equals(dfo)) # Sorting is different on Py2.7 and 3.5
for name in dfo.keys():
self.assertTrue(np.allclose(df[name], dfo[name], rtol=RTOL))
return
if __name__ == '__main__':
import unittest
unittest.main()
|
"""
Download articles from RSS feeds
"""
from django.apps import AppConfig
class RssSyncAppConfig(AppConfig):
name = 'coop_cms.apps.rss_sync'
verbose_name = "RSS Synchronization"
|
"""Added short name
Revision ID: 1f5a646334e1
Revises: 1e9f6460c977
Create Date: 2014-05-09 10:17:17.150660
"""
revision = '1f5a646334e1'
down_revision = '1e9f6460c977'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('station', sa.Column('short_name', sa.String(length=8), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('station', 'short_name')
### end Alembic commands ###
|
import sys
import os
import traceback
import time
import tempfile
import threading
import logging
import platform
import atexit
import shutil
import inspect
from collections import deque, OrderedDict
from ginga import cmap, imap
from ginga.misc import Bunch, Timer, Future
from ginga.util import catalog, iohelper, loader, toolbox
from ginga.util import viewer as gviewer
from ginga.canvas.CanvasObject import drawCatalog
from ginga.canvas.types.layer import DrawingCanvas
from ginga.gw import GwHelp, GwMain, PluginManager
from ginga.gw import Widgets, Viewers, Desktop
from ginga import toolkit
from ginga.fonts import font_asst
from ginga import __version__
from ginga.rv.Channel import Channel
have_docutils = False
try:
from docutils.core import publish_string
have_docutils = True
except ImportError:
pass
pluginconfpfx = None
package_home = os.path.split(sys.modules['ginga.version'].__file__)[0]
tkname = toolkit.get_family()
if tkname is not None:
# TODO: this relies on a naming convention for widget directories!
# TODO: I think this can be removed, since the widget specific
# plugin directories have been deleted
child_dir = os.path.join(package_home, tkname + 'w', 'plugins')
sys.path.insert(0, child_dir)
icon_path = os.path.abspath(os.path.join(package_home, 'icons'))
class ControlError(Exception):
pass
class GingaViewError(Exception):
pass
class GingaShell(GwMain.GwMain, Widgets.Application):
"""
Main Ginga shell for housing plugins and running the reference
viewer.
"""
def __init__(self, logger, thread_pool, module_manager, preferences,
ev_quit=None):
GwMain.GwMain.__init__(self, logger=logger, ev_quit=ev_quit,
app=self, thread_pool=thread_pool)
# Create general preferences
self.prefs = preferences
settings = self.prefs.create_category('general')
settings.add_defaults(fixedFont=None,
serifFont=None,
sansFont=None,
channel_follows_focus=False,
scrollbars='off',
numImages=10,
# Offset to add to numpy-based coords
pixel_coords_offset=1.0,
# save primary header when loading files
inherit_primary_header=True,
cursor_interval=0.050,
download_folder=None,
save_layout=False,
channel_prefix="Image")
settings.load(onError='silent')
# this will set self.logger and self.settings
Widgets.Application.__init__(self, logger=logger, settings=settings)
self.mm = module_manager
# event for controlling termination of threads executing in this
# object
if not ev_quit:
self.ev_quit = threading.Event()
else:
self.ev_quit = ev_quit
self.tmpdir = tempfile.mkdtemp()
# remove temporary directory on exit
atexit.register(_rmtmpdir, self.tmpdir)
# For callbacks
for name in ('add-image', 'channel-change', 'remove-image',
'add-channel', 'delete-channel', 'field-info',
'add-image-info', 'remove-image-info'):
self.enable_callback(name)
# Initialize the timer factory
self.timer_factory = Timer.TimerFactory(ev_quit=self.ev_quit,
logger=self.logger)
self.timer_factory.wind()
self.lock = threading.RLock()
self.channel = {}
self.channel_names = []
self.cur_channel = None
self.wscount = 0
self.statustask = None
self.preload_lock = threading.RLock()
self.preload_list = deque([], 4)
# Load bindings preferences
bindprefs = self.prefs.create_category('bindings')
bindprefs.load(onError='silent')
self.plugins = []
self._plugin_sort_method = self.get_plugin_menuname
# some default colormap info
self.cm = cmap.get_cmap("gray")
self.im = imap.get_imap("ramp")
# This plugin manager handles "global" (aka standard) plug ins
# (unique instances, not per channel)
self.gpmon = self.get_plugin_manager(self.logger, self,
None, self.mm)
# Initialize catalog and image server bank
self.imgsrv = catalog.ServerBank(self.logger)
# state for implementing field-info callback
self._cursor_task = self.get_backend_timer()
self._cursor_task.set_callback('expired', self._cursor_timer_cb)
self._cursor_last_update = time.time()
self.cursor_interval = self.settings.get('cursor_interval', 0.050)
# add user preferred fonts for aliases, if present
fixed_font = self.settings.get('fixedFont', None)
if fixed_font is not None:
font_asst.add_alias('fixed', fixed_font)
serif_font = self.settings.get('serifFont', None)
if serif_font is not None:
font_asst.add_alias('serif', serif_font)
sans_font = self.settings.get('sansFont', None)
if sans_font is not None:
font_asst.add_alias('sans', sans_font)
# GUI initialization
self.w = Bunch.Bunch()
self.iconpath = icon_path
self.main_wsname = None
self._lastwsname = None
self.ds = None
self.layout = None
self.layout_file = None
self._lsize = None
self._rsize = None
self.filesel = None
self.menubar = None
self.gui_dialog_lock = threading.RLock()
self.gui_dialog_list = []
gviewer.register_viewer(Viewers.CanvasView)
gviewer.register_viewer(Viewers.TableViewGw)
gviewer.register_viewer(Viewers.PlotViewGw)
def get_server_bank(self):
return self.imgsrv
def get_preferences(self):
return self.prefs
def get_timer(self):
return self.timer_factory.timer()
def get_backend_timer(self):
return self.make_timer()
def stop(self):
self.logger.info("shutting down Ginga...")
self.timer_factory.quit()
self.ev_quit.set()
self.logger.debug("should be exiting now")
def reset_viewer(self):
channel = self.get_current_channel()
opmon = channel.opmon
opmon.deactivate_focused()
self.normalsize()
def get_draw_class(self, drawtype):
drawtype = drawtype.lower()
return drawCatalog[drawtype]
def get_draw_classes(self):
return drawCatalog
def make_async_gui_callback(self, name, *args, **kwargs):
# NOTE: asynchronous!
self.gui_do(self.make_callback, name, *args, **kwargs)
def make_gui_callback(self, name, *args, **kwargs):
if self.is_gui_thread():
return self.make_callback(name, *args, **kwargs)
else:
# note: this cannot be "gui_call"--locks viewer.
# so call becomes async when a non-gui thread invokes it
self.gui_do(self.make_callback, name, *args, **kwargs)
# PLUGIN MANAGEMENT
def start_operation(self, opname):
return self.start_local_plugin(None, opname, None)
def stop_operation_channel(self, chname, opname):
self.logger.warning(
"Do not use this method name--it will be deprecated!")
return self.stop_local_plugin(chname, opname)
def start_local_plugin(self, chname, opname, future):
channel = self.get_channel(chname)
opmon = channel.opmon
opmon.start_plugin_future(channel.name, opname, future)
if hasattr(channel.viewer, 'onscreen_message'):
channel.viewer.onscreen_message(opname, delay=1.0)
def stop_local_plugin(self, chname, opname):
channel = self.get_channel(chname)
opmon = channel.opmon
opmon.deactivate(opname)
def call_local_plugin_method(self, chname, plugin_name, method_name,
args, kwargs):
"""
Parameters
----------
chname : str
The name of the channel containing the plugin.
plugin_name : str
The name of the local plugin containing the method to call.
method_name : str
The name of the method to call.
args : list or tuple
The positional arguments to the method
kwargs : dict
The keyword arguments to the method
Returns
-------
result : return value from calling the method
"""
channel = self.get_channel(chname)
opmon = channel.opmon
p_obj = opmon.get_plugin(plugin_name)
method = getattr(p_obj, method_name)
return self.gui_call(method, *args, **kwargs)
def start_global_plugin(self, plugin_name, raise_tab=False):
self.gpmon.start_plugin_future(None, plugin_name, None)
if raise_tab:
pInfo = self.gpmon.get_plugin_info(plugin_name)
self.ds.raise_tab(pInfo.tabname)
def stop_global_plugin(self, plugin_name):
self.gpmon.deactivate(plugin_name)
def call_global_plugin_method(self, plugin_name, method_name,
args, kwargs):
"""
Parameters
----------
plugin_name : str
The name of the global plugin containing the method to call.
method_name : str
The name of the method to call.
args : list or tuple
The positional arguments to the method
kwargs : dict
The keyword arguments to the method
Returns
-------
result : return value from calling the method
"""
p_obj = self.gpmon.get_plugin(plugin_name)
method = getattr(p_obj, method_name)
return self.gui_call(method, *args, **kwargs)
def start_plugin(self, plugin_name, spec):
ptype = spec.get('ptype', 'local')
if ptype == 'local':
self.start_operation(plugin_name)
else:
self.start_global_plugin(plugin_name, raise_tab=True)
def add_local_plugin(self, spec):
try:
spec.setdefault('ptype', 'local')
name = spec.setdefault('name', spec.get('klass', spec.module))
pfx = spec.get('pfx', pluginconfpfx)
path = spec.get('path', None)
self.mm.load_module(spec.module, pfx=pfx, path=path)
self.plugins.append(spec)
except Exception as e:
self.logger.error("Unable to load local plugin '%s': %s" % (
name, str(e)))
def add_global_plugin(self, spec):
try:
spec.setdefault('ptype', 'global')
name = spec.setdefault('name', spec.get('klass', spec.module))
pfx = spec.get('pfx', pluginconfpfx)
path = spec.get('path', None)
self.mm.load_module(spec.module, pfx=pfx, path=path)
self.plugins.append(spec)
self.gpmon.load_plugin(name, spec)
except Exception as e:
self.logger.error("Unable to load global plugin '%s': %s" % (
name, str(e)))
def add_plugin(self, spec):
if not spec.get('enabled', True):
return
ptype = spec.get('ptype', 'local')
if ptype == 'global':
self.add_global_plugin(spec)
else:
self.add_local_plugin(spec)
def set_plugins(self, plugins):
self.plugins = []
for spec in plugins:
self.add_plugin(spec)
def get_plugins(self):
return self.plugins
def get_plugin_spec(self, name):
"""Get the specification attributes for plugin with name `name`."""
l_name = name.lower()
for spec in self.plugins:
name = spec.get('name', spec.get('klass', spec.module))
if name.lower() == l_name:
return spec
raise KeyError(name)
def get_plugin_menuname(self, spec):
category = spec.get('category', None)
name = spec.setdefault('name', spec.get('klass', spec.module))
menu = spec.get('menu', spec.get('tab', name))
if category is None:
return menu
return category + '.' + menu
def set_plugin_sortmethod(self, fn):
self._plugin_sort_method = fn
def boot_plugins(self):
# Sort plugins according to desired order
self.plugins.sort(key=self._plugin_sort_method)
for spec in self.plugins:
name = spec.setdefault('name', spec.get('klass', spec.module))
hidden = spec.get('hidden', False)
if not hidden:
self.add_plugin_menu(name, spec)
start = spec.get('start', True)
# for now only start global plugins that have start==True
# channels are not yet created by this time
if start and spec.get('ptype', 'local') == 'global':
self.error_wrap(self.start_plugin, name, spec)
def show_error(self, errmsg, raisetab=True):
if self.gpmon.has_plugin('Errors'):
obj = self.gpmon.get_plugin('Errors')
obj.add_error(errmsg)
if raisetab:
self.ds.raise_tab('Errors')
def error_wrap(self, method, *args, **kwargs):
try:
return method(*args, **kwargs)
except Exception as e:
errmsg = "\n".join([e.__class__.__name__, str(e)])
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception as e:
tb_str = "Traceback information unavailable."
errmsg += tb_str
self.logger.error(errmsg)
self.gui_do(self.show_error, errmsg, raisetab=True)
def help_text(self, name, text, text_kind='plain', trim_pfx=0):
"""
Provide help text for the user.
This method will convert the text as necessary with docutils and
display it in the WBrowser plugin, if available. If the plugin is
not available and the text is type 'rst' then the text will be
displayed in a plain text widget.
Parameters
----------
name : str
Category of help to show.
text : str
The text to show. Should be plain, HTML or RST text
text_kind : str (optional)
One of 'plain', 'html', 'rst'. Default is 'plain'.
trim_pfx : int (optional)
Number of spaces to trim off the beginning of each line of text.
"""
if trim_pfx > 0:
# caller wants to trim some space off the front
# of each line
text = toolbox.trim_prefix(text, trim_pfx)
if text_kind == 'rst':
# try to convert RST to HTML using docutils
try:
overrides = {'input_encoding': 'ascii',
'output_encoding': 'utf-8'}
text_html = publish_string(text, writer_name='html',
settings_overrides=overrides)
# docutils produces 'bytes' output, but webkit needs
# a utf-8 string
text = text_html.decode('utf-8')
text_kind = 'html'
except Exception as e:
self.logger.error("Error converting help text to HTML: %s" % (
str(e)))
# revert to showing RST as plain text
else:
raise ValueError(
"I don't know how to display text of kind '%s'" % (text_kind))
if text_kind == 'html':
self.help(text=text, text_kind='html')
else:
self.show_help_text(name, text)
def help(self, text=None, text_kind='url'):
if not self.gpmon.has_plugin('WBrowser'):
return self.show_error("help() requires 'WBrowser' plugin")
self.start_global_plugin('WBrowser')
# need to let GUI finish processing, it seems
self.update_pending()
obj = self.gpmon.get_plugin('WBrowser')
if text is not None:
if text_kind == 'url':
obj.browse(text)
else:
obj.browse(text, url_is_content=True)
else:
obj.show_help()
def show_help_text(self, name, help_txt, wsname='right'):
"""
Show help text in a closeable tab window. The title of the
window is set from ``name`` prefixed with 'HELP:'
"""
tabname = 'HELP: {}'.format(name)
group = 1
tabnames = self.ds.get_tabnames(group)
if tabname in tabnames:
# tab is already up somewhere
return
vbox = Widgets.VBox()
vbox.set_margins(4, 4, 4, 4)
vbox.set_spacing(2)
msg_font = self.get_font('fixed', 12)
tw = Widgets.TextArea(wrap=False, editable=False)
tw.set_font(msg_font)
tw.set_text(help_txt)
vbox.add_widget(tw, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
def _close_cb(w):
self.ds.remove_tab(tabname)
btn = Widgets.Button("Close")
btn.add_callback('activated', _close_cb)
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns, stretch=0)
self.ds.add_tab(wsname, vbox, group, tabname)
self.ds.raise_tab(tabname)
# BASIC IMAGE OPERATIONS
def load_image(self, filespec, idx=None, show_error=True):
"""
A wrapper around ginga.util.loader.load_data()
Parameters
----------
filespec : str
The path of the file to load (must reference a single file).
idx : str, int or tuple; optional, defaults to None
The index of the image to open within the file.
show_error : bool, optional, defaults to True
If `True`, then display an error in the GUI if the file
loading process fails.
Returns
-------
data_obj : data object named by filespec
"""
save_prihdr = self.settings.get('inherit_primary_header', False)
try:
data_obj = loader.load_data(filespec, logger=self.logger,
idx=idx,
save_primary_header=save_prihdr)
except Exception as e:
errmsg = "Failed to load file '%s': %s" % (
filespec, str(e))
self.logger.error(errmsg)
try:
(type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception as e:
tb_str = "Traceback information unavailable."
if show_error:
self.gui_do(self.show_error, errmsg + '\n' + tb_str)
raise ControlError(errmsg)
self.logger.debug("Successfully loaded file into object.")
return data_obj
def load_file(self, filepath, chname=None, wait=True,
create_channel=True, display_image=True,
image_loader=None):
"""Load a file and display it.
Parameters
----------
filepath : str
The path of the file to load (must reference a local file).
chname : str, optional
The name of the channel in which to display the image.
wait : bool, optional
If `True`, then wait for the file to be displayed before returning
(synchronous behavior).
create_channel : bool, optional
Create channel.
display_image : bool, optional
If not `False`, then will load the image.
image_loader : func, optional
A special image loader, if provided.
Returns
-------
image
The image object that was loaded.
"""
if not chname:
channel = self.get_current_channel()
else:
if not self.has_channel(chname) and create_channel:
self.gui_call(self.add_channel, chname)
channel = self.get_channel(chname)
chname = channel.name
if image_loader is None:
image_loader = self.load_image
cache_dir = self.settings.get('download_folder', self.tmpdir)
info = iohelper.get_fileinfo(filepath, cache_dir=cache_dir)
# check that file is locally accessible
if not info.ondisk:
errmsg = "File must be locally loadable: %s" % (filepath)
self.gui_do(self.show_error, errmsg)
return
filepath = info.filepath
kwargs = {}
idx = None
if info.numhdu is not None:
kwargs['idx'] = info.numhdu
try:
image = image_loader(filepath, **kwargs)
except Exception as e:
errmsg = "Failed to load '%s': %s" % (filepath, str(e))
self.gui_do(self.show_error, errmsg)
return
future = Future.Future()
future.freeze(image_loader, filepath, **kwargs)
# Save a future for this image to reload it later if we
# have to remove it from memory
image.set(loader=image_loader, image_future=future)
if image.get('path', None) is None:
image.set(path=filepath)
# Assign a name to the image if the loader did not.
name = image.get('name', None)
if name is None:
name = iohelper.name_image_from_path(filepath, idx=idx)
image.set(name=name)
if display_image:
# Display image. If the wait parameter is False then don't wait
# for the image to load into the viewer
if wait:
self.gui_call(self.add_image, name, image, chname=chname)
else:
self.gui_do(self.add_image, name, image, chname=chname)
else:
self.gui_do(self.bulk_add_image, name, image, chname)
# Return the image
return image
def add_download(self, info, future):
"""
Hand off a download to the Downloads plugin, if it is present.
Parameters
----------
info : `~ginga.misc.Bunch.Bunch`
A bunch of information about the URI as returned by
`ginga.util.iohelper.get_fileinfo()`
future : `~ginga.misc.Future.Future`
A future that represents the future computation to be performed
after downloading the file. Resolving the future will trigger
the computation.
"""
if self.gpmon.has_plugin('Downloads'):
obj = self.gpmon.get_plugin('Downloads')
self.gui_do(obj.add_download, info, future)
else:
self.show_error("Please activate the 'Downloads' plugin to"
" enable download functionality")
def open_uri_cont(self, filespec, loader_cont_fn):
"""Download a URI (if necessary) and do some action on it.
If the file is already present (e.g. a file:// URI) then this
merely confirms that and invokes the continuation.
Parameters
----------
filespec : str
The path of the file to load (can be a non-local URI)
loader_cont_fn : func (str) -> None
A continuation consisting of a function of one argument
that does something with the file once it is downloaded
The parameter is the local filepath after download, plus
any "index" understood by the loader.
"""
info = iohelper.get_fileinfo(filespec)
# download file if necessary
if ((not info.ondisk) and (info.url is not None) and
(not info.url.startswith('file:'))):
# create up a future to do the download and set up a
# callback to handle it when finished
def _download_cb(future):
filepath = future.get_value(block=False)
self.logger.debug("downloaded: %s" % (filepath))
self.gui_do(loader_cont_fn, filepath + info.idx)
future = Future.Future()
future.add_callback('resolved', _download_cb)
self.add_download(info, future)
return
# invoke the continuation
loader_cont_fn(info.filepath + info.idx)
def open_file_cont(self, pathspec, loader_cont_fn):
"""Open a file and do some action on it.
Parameters
----------
pathspec : str
The path of the file to load (can be a URI, but must reference
a local file).
loader_cont_fn : func (data_obj) -> None
A continuation consisting of a function of one argument
that does something with the data_obj created by the loader
"""
self.assert_nongui_thread()
info = iohelper.get_fileinfo(pathspec)
filepath = info.filepath
if not os.path.exists(filepath):
errmsg = "File does not appear to exist: '%s'" % (filepath)
self.gui_do(self.show_error, errmsg)
return
warnmsg = ""
try:
typ, subtyp = iohelper.guess_filetype(filepath)
except Exception as e:
warnmsg = "Couldn't determine file type of '{0:}': " \
"{1:}".format(filepath, str(e))
self.logger.warning(warnmsg)
typ = None
def _open_file(opener_class):
# kwd args to pass to opener
kwargs = dict()
save_prihdr = self.settings.get('inherit_primary_header',
False)
kwargs['save_primary_header'] = save_prihdr
# open the file and load the items named by the index
opener = opener_class(self.logger)
try:
with opener.open_file(filepath) as io_f:
io_f.load_idx_cont(info.idx, loader_cont_fn, **kwargs)
except Exception as e:
errmsg = "Error opening '%s': %s" % (filepath, str(e))
try:
(_type, value, tb) = sys.exc_info()
tb_str = "\n".join(traceback.format_tb(tb))
except Exception as e:
tb_str = "Traceback information unavailable."
self.gui_do(self.show_error, errmsg + '\n' + tb_str)
def _check_open(errmsg):
if typ is None:
errmsg = ("Error determining file type: {0:}\n"
"\nPlease choose an opener or cancel, for file:\n"
"{1:}".format(errmsg, filepath))
openers = loader.get_all_openers()
self.gui_do(self.gui_choose_file_opener, errmsg, openers,
_open_file, None, filepath)
else:
mimetype = "{}/{}".format(typ, subtyp)
openers = loader.get_openers(mimetype)
num_openers = len(openers)
if num_openers == 1:
opener_class = openers[0].opener
self.nongui_do(_open_file, opener_class)
self.__next_dialog()
elif num_openers == 0:
errmsg = ("No registered opener for: '{0:}'\n"
"\nPlease choose an opener or cancel, for file:\n"
"{1:}".format(mimetype, filepath))
openers = loader.get_all_openers()
self.gui_do(self.gui_choose_file_opener, errmsg, openers,
_open_file, mimetype, filepath)
else:
errmsg = ("Multiple registered openers for: '{0:}'\n"
"\nPlease choose an opener or cancel, for file:\n"
"{1:}".format(mimetype, filepath))
self.gui_do(self.gui_choose_file_opener, errmsg, openers,
_open_file, '*', filepath)
future = Future.Future()
future.freeze(_check_open, warnmsg)
with self.gui_dialog_lock:
self.gui_dialog_list.append(future)
if len(self.gui_dialog_list) == 1:
self.nongui_do_future(future)
def open_uris(self, uris, chname=None, bulk_add=False):
"""Open a set of URIs.
Parameters
----------
uris : list of str
The URIs of the files to load
chname: str, optional (defaults to channel with focus)
The name of the channel in which to load the items
bulk_add : bool, optional (defaults to False)
If True, then all the data items are loaded into the
channel without disturbing the current item there.
If False, then the first item loaded will be displayed
and the rest of the items will be loaded as bulk.
"""
if len(uris) == 0:
return
if chname is None:
channel = self.get_channel_info()
if channel is None:
# No active channel to load these into
return
chname = channel.name
channel = self.get_channel_on_demand(chname)
def show_dataobj_bulk(data_obj):
self.gui_do(channel.add_image, data_obj, bulk_add=True)
def load_file_bulk(filepath):
self.nongui_do(self.open_file_cont, filepath, show_dataobj_bulk)
def show_dataobj(data_obj):
self.gui_do(channel.add_image, data_obj, bulk_add=False)
def load_file(filepath):
self.nongui_do(self.open_file_cont, filepath, show_dataobj)
# determine whether first file is loaded as a bulk load
if bulk_add:
self.open_uri_cont(uris[0], load_file_bulk)
else:
self.open_uri_cont(uris[0], load_file)
self.update_pending()
for uri in uris[1:]:
# rest of files are all loaded using bulk load
self.open_uri_cont(uri, load_file_bulk)
self.update_pending()
def add_preload(self, chname, image_info):
bnch = Bunch.Bunch(chname=chname, info=image_info)
with self.preload_lock:
self.preload_list.append(bnch)
self.nongui_do(self.preload_scan)
def preload_scan(self):
# preload any pending files
# TODO: do we need any throttling of loading here?
with self.preload_lock:
while len(self.preload_list) > 0:
bnch = self.preload_list.pop()
self.nongui_do(self.preload_file, bnch.chname,
bnch.info.name, bnch.info.path,
image_future=bnch.info.image_future)
def preload_file(self, chname, imname, path, image_future=None):
# sanity check to see if the file is already in memory
self.logger.debug("preload: checking %s in %s" % (imname, chname))
channel = self.get_channel(chname)
if imname not in channel.datasrc:
# not there--load image in a non-gui thread, then have the
# gui add it to the channel silently
self.logger.info("preloading image %s" % (path))
if image_future is None:
# TODO: need index info?
image = self.load_image(path)
else:
image = image_future.thaw()
self.gui_do(self.add_image, imname, image,
chname=chname, silent=True)
self.logger.debug("end preload")
def zoom_in(self):
"""Zoom the view in one zoom step.
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'zoom_in'):
viewer.zoom_in()
return True
def zoom_out(self):
"""Zoom the view out one zoom step.
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'zoom_out'):
viewer.zoom_out()
return True
def zoom_1_to_1(self):
"""Zoom the view to a 1 to 1 pixel ratio (100 %%).
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'scale_to'):
viewer.scale_to(1.0, 1.0)
return True
def zoom_fit(self):
"""Zoom the view to fit the image entirely in the window.
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'zoom_fit'):
viewer.zoom_fit()
return True
def auto_levels(self):
"""Perform an auto cut levels on the image.
"""
viewer = self.getfocus_viewer()
if hasattr(viewer, 'auto_levels'):
viewer.auto_levels()
def prev_img_ws(self, ws, loop=True):
"""Go to the previous image in the focused channel in the workspace.
"""
channel = self.get_active_channel_ws(ws)
if channel is None:
return
channel.prev_image()
return True
def next_img_ws(self, ws, loop=True):
"""Go to the next image in the focused channel in the workspace.
"""
channel = self.get_active_channel_ws(ws)
if channel is None:
return
channel.next_image()
return True
def prev_img(self, loop=True):
"""Go to the previous image in the channel.
"""
channel = self.get_current_channel()
if channel is None:
self.show_error("Please create a channel.", raisetab=True)
return
channel.prev_image()
return True
def next_img(self, loop=True):
"""Go to the next image in the channel.
"""
channel = self.get_current_channel()
if channel is None:
self.show_error("Please create a channel.", raisetab=True)
return
channel.next_image()
return True
def get_current_workspace(self):
channel = self.get_channel_info()
if channel is None:
return None
ws = self.ds.get_ws(channel.workspace)
return ws
def get_active_channel_ws(self, ws):
children = list(ws.nb.get_children())
if len(children) == 0:
return None
# Not exactly the most robust or straightforward way to find the
# active channel in this workspace...
idx = ws.nb.get_index()
child = ws.nb.index_to_widget(idx)
chname = child.extdata.tab_title
if self.has_channel(chname):
return self.get_channel(chname)
return None
def prev_channel_ws(self, ws):
children = list(ws.nb.get_children())
if len(children) == 0:
self.show_error("No channels in this workspace.",
raisetab=True)
return
ws.to_previous()
channel = self.get_active_channel_ws(ws)
if (channel is not None) and self.has_channel(channel.name):
self.change_channel(channel.name, raisew=True)
def next_channel_ws(self, ws):
children = list(ws.nb.get_children())
if len(children) == 0:
self.show_error("No channels in this workspace.",
raisetab=True)
return
ws.to_next()
channel = self.get_active_channel_ws(ws)
if (channel is not None) and self.has_channel(channel.name):
self.change_channel(channel.name, raisew=True)
def prev_channel(self):
ws = self.get_current_workspace()
if ws is None:
self.show_error("Please select or create a workspace",
raisetab=True)
return
self.prev_channel_ws(ws)
def next_channel(self):
ws = self.get_current_workspace()
if ws is None:
self.show_error("Please select or create a workspace",
raisetab=True)
return
self.next_channel_ws(ws)
def add_channel_auto_ws(self, ws):
if ws.toolbar is not None:
chname = ws.extdata.w_chname.get_text().strip()
else:
chname = ''
if len(chname) == 0:
# make up a channel name
chpfx = self.settings.get('channel_prefix', "Image")
chpfx = ws.extdata.get('chpfx', chpfx)
chname = chpfx
if self.has_channel(chname):
chname = self.make_channel_name(chname)
try:
self.get_channel(chname)
# <-- channel name already in use
self.show_error(
"Channel name '%s' cannot be used, sorry." % (chname),
raisetab=True)
return
except KeyError:
pass
return self.add_channel(chname, workspace=ws.name)
def add_channel_auto(self):
ws = self.get_current_workspace()
if ws is None:
self.show_error("Please select or create a workspace",
raisetab=True)
return
return self.add_channel_auto_ws(ws)
def remove_channel_auto(self):
channel = self.get_channel_info()
if channel is None:
return
self.delete_channel(channel.name)
def configure_workspace(self, wstype):
ws = self.get_current_workspace()
ws.configure_wstype(wstype)
def cycle_workspace_type(self):
ws = self.get_current_workspace()
ws.cycle_wstype()
def add_workspace(self, wsname, wstype, inSpace=None):
if inSpace is None:
inSpace = self.main_wsname
if wsname in self.ds.get_tabnames(None):
raise ValueError("Tab name already in use: '%s'" % (wsname))
ws = self.ds.make_ws(name=wsname, group=1, wstype=wstype,
use_toolbar=True)
if inSpace != 'top level':
self.ds.add_tab(inSpace, ws.widget, 1, ws.name)
else:
#width, height = 700, 800
#self.ds.create_toplevel_ws(width, height, group=1)
top_w = self.ds.add_toplevel(ws, ws.name)
ws.extdata.top_w = top_w
return ws
# CHANNEL MANAGEMENT
def add_image(self, imname, image, chname=None, silent=False):
if chname is None:
channel = self.get_current_channel()
if channel is None:
raise ValueError("Need to provide a channel name to add "
"the image")
chname = channel.name
# add image to named channel
channel = self.get_channel_on_demand(chname)
channel.add_image(image, silent=silent)
def advertise_image(self, chname, image):
channel = self.get_channel(chname)
info = channel.get_image_info(image.get('name'))
self.make_gui_callback('add-image', chname, image, info)
def update_image_info(self, image, info):
for chname in self.get_channel_names():
channel = self.get_channel(chname)
channel.update_image_info(image, info)
def bulk_add_image(self, imname, image, chname):
channel = self.get_channel_on_demand(chname)
channel.add_image(image, bulk_add=True)
def get_image(self, chname, imname):
channel = self.get_channel(chname)
if channel is None:
return None
return channel.get_loaded_image(imname)
def getfocus_viewer(self):
channel = self.get_current_channel()
if channel is None:
return None
return channel.viewer
def get_viewer(self, chname):
channel = self.get_channel(chname)
if channel is None:
return None
return channel.viewer
def switch_name(self, chname, imname, path=None,
image_future=None):
try:
# create channel if it doesn't exist already
channel = self.get_channel_on_demand(chname)
channel.switch_name(imname)
self.change_channel(channel.name)
except Exception as e:
self.show_error("Couldn't switch to image '%s': %s" % (
str(imname), str(e)), raisetab=True)
def redo_plugins(self, image, channel):
if image is not None:
imname = image.get('name', None)
if (imname is not None) and (imname not in channel):
# image may have been removed--
# skip updates to this channel's plugins
return
# New data in channel
# update active global plugins
opmon = self.gpmon
for key in opmon.get_active():
obj = opmon.get_plugin(key)
try:
if image is None:
self.gui_do(obj.blank, channel)
else:
self.gui_do(obj.redo, channel, image)
except Exception as e:
self.logger.error(
"Failed to continue operation: %s" % (str(e)))
# TODO: log traceback?
# update active local plugins
opmon = channel.opmon
for key in opmon.get_active():
obj = opmon.get_plugin(key)
try:
if image is None:
self.gui_do(obj.blank)
else:
self.gui_do(obj.redo)
except Exception as e:
self.logger.error(
"Failed to continue operation: %s" % (str(e)))
# TODO: log traceback?
def close_plugins(self, channel):
"""Close all plugins associated with the channel."""
opmon = channel.opmon
for key in opmon.get_active():
obj = opmon.get_plugin(key)
try:
self.gui_call(obj.close)
except Exception as e:
self.logger.error(
"Failed to continue operation: %s" % (str(e)))
# TODO: log traceback?
def channel_image_updated(self, channel, image):
with self.lock:
self.logger.debug("Update image start")
start_time = time.time()
# add cb so that if image is modified internally
# our plugins get updated
if image is not None:
image.add_callback('modified', self.redo_plugins, channel)
self.logger.debug("executing redo() in plugins...")
self.redo_plugins(image, channel)
split_time1 = time.time()
self.logger.info("Channel image update: %.4f sec" % (
split_time1 - start_time))
def change_channel(self, chname, image=None, raisew=True):
self.logger.debug("change channel: %s" % (chname))
name = chname.lower()
if self.cur_channel is None:
oldchname = None
else:
oldchname = self.cur_channel.name.lower()
channel = self.get_channel(name)
if name != oldchname:
with self.lock:
self.cur_channel = channel
if name != oldchname:
# raise tab
if raisew:
#self.ds.raise_tab(channel.workspace)
self.ds.raise_tab(name)
if oldchname is not None:
try:
self.ds.highlight_tab(oldchname, False)
except Exception:
# old channel may not exist!
pass
self.ds.highlight_tab(name, True)
## # Update title bar
title = channel.name
## if image is not None:
## name = image.get('name', 'Noname')
## title += ": %s" % (name)
self.set_titlebar(title)
if image is not None:
try:
channel.switch_image(image)
except Exception as e:
self.show_error("Error viewing data object: {}".format(e),
raisetab=True)
self.make_gui_callback('channel-change', channel)
self.update_pending()
return True
def has_channel(self, chname):
name = chname.lower()
with self.lock:
return name in self.channel
def get_channel(self, chname):
with self.lock:
if chname is None:
return self.cur_channel
name = chname.lower()
return self.channel[name]
def get_current_channel(self):
with self.lock:
return self.cur_channel
def get_channel_info(self, chname=None):
# TO BE DEPRECATED--please use get_channel()
return self.get_channel(chname)
def get_channel_on_demand(self, chname):
if self.has_channel(chname):
return self.get_channel(chname)
return self.gui_call(self.add_channel, chname)
def get_channel_name(self, viewer):
with self.lock:
items = self.channel.items()
for name, channel in items:
if viewer in channel.viewers:
return channel.name
return None
def make_channel_name(self, pfx):
i = 0
while i < 10000:
chname = pfx + str(i)
if not self.has_channel(chname):
return chname
i += 1
return pfx + str(time.time())
def add_channel(self, chname, workspace=None,
num_images=None, settings=None,
settings_template=None,
settings_share=None, share_keylist=None):
"""Create a new Ginga channel.
Parameters
----------
chname : str
The name of the channel to create.
workspace : str or None
The name of the workspace in which to create the channel
num_images : int or None
The cache size for the number of images to keep in memory
settings : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences. If not given, one will be created.
settings_template : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences template
settings_share : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences instance to share with newly created settings
share_keylist : list of str
List of names of settings that should be shared
Returns
-------
channel : `~ginga.misc.Bunch.Bunch`
The channel info bunch.
"""
with self.lock:
if self.has_channel(chname):
return self.get_channel(chname)
if chname in self.ds.get_tabnames(None):
raise ValueError("Tab name already in use: '%s'" % (chname))
name = chname
if settings is None:
settings = self.prefs.create_category('channel_' + name)
try:
settings.load(onError='raise')
except Exception as e:
self.logger.info("no saved preferences found for channel "
"'%s', using default: %s" % (name, str(e)))
# copy template settings to new channel
if settings_template is not None:
osettings = settings_template
osettings.copy_settings(settings)
else:
try:
# use channel_Image as a template if one was not
# provided
osettings = self.prefs.get_settings('channel_Image')
self.logger.debug("Copying settings from 'Image' to "
"'%s'" % (name))
osettings.copy_settings(settings)
except KeyError:
pass
if (share_keylist is not None) and (settings_share is not None):
# caller wants us to share settings with another viewer
settings_share.share_settings(settings, keylist=share_keylist)
# Make sure these preferences are at least defined
if num_images is None:
num_images = settings.get('numImages',
self.settings.get('numImages', 1))
settings.set_defaults(switchnew=True, numImages=num_images,
raisenew=True, genthumb=True,
focus_indicator=False,
preload_images=False, sort_order='loadtime')
self.logger.debug("Adding channel '%s'" % (chname))
channel = Channel(chname, self, datasrc=None,
settings=settings)
bnch = self.add_viewer(chname, settings,
workspace=workspace)
# for debugging
bnch.image_viewer.set_name('channel:%s' % (chname))
opmon = self.get_plugin_manager(self.logger, self,
self.ds, self.mm)
channel.widget = bnch.widget
channel.container = bnch.container
channel.workspace = bnch.workspace
channel.connect_viewer(bnch.image_viewer)
channel.viewer = bnch.image_viewer
# older name, should eventually be deprecated
channel.fitsimage = bnch.image_viewer
channel.opmon = opmon
name = chname.lower()
self.channel[name] = channel
# Update the channels control
self.channel_names.append(chname)
self.channel_names.sort()
if len(self.channel_names) == 1:
self.cur_channel = channel
# Prepare local plugins for this channel
for spec in self.get_plugins():
opname = spec.get('klass', spec.get('module'))
if spec.get('ptype', 'global') == 'local':
opmon.load_plugin(opname, spec, chinfo=channel)
self.make_gui_callback('add-channel', channel)
return channel
def delete_channel(self, chname):
"""Delete a given channel from viewer."""
name = chname.lower()
if len(self.channel_names) < 1:
self.logger.error('Delete channel={0} failed. '
'No channels left.'.format(chname))
return
with self.lock:
channel = self.channel[name]
# Close local plugins open on this channel
self.close_plugins(channel)
try:
idx = self.channel_names.index(chname)
except ValueError:
idx = 0
# Update the channels control
self.channel_names.remove(channel.name)
self.channel_names.sort()
self.ds.remove_tab(chname)
del self.channel[name]
self.prefs.remove_settings('channel_' + chname)
# pick new channel
num_channels = len(self.channel_names)
if num_channels > 0:
if idx >= num_channels:
idx = num_channels - 1
self.change_channel(self.channel_names[idx])
else:
self.cur_channel = None
self.make_gui_callback('delete-channel', channel)
def get_channel_names(self):
with self.lock:
return self.channel_names
def scale2text(self, scalefactor):
if scalefactor >= 1.0:
text = '%.2fx' % (scalefactor)
else:
text = '1/%.2fx' % (1.0 / scalefactor)
return text
def banner(self, raiseTab=False):
banner_file = os.path.join(self.iconpath, 'ginga-splash.ppm')
chname = 'Ginga'
channel = self.get_channel_on_demand(chname)
viewer = channel.viewer
viewer.enable_autocuts('off')
viewer.enable_autozoom('off')
viewer.enable_autocenter('on')
viewer.cut_levels(0, 255)
viewer.scale_to(1, 1)
image = self.load_file(banner_file, chname=chname, wait=True)
# Insert Ginga version info
header = image.get_header()
header['VERSION'] = __version__
if raiseTab:
self.change_channel(chname)
def remove_image_by_name(self, chname, imname, impath=None):
channel = self.get_channel(chname)
viewer = channel.viewer
self.logger.info("removing image %s" % (imname))
# If this is the current image in the viewer, clear the viewer
image = viewer.get_image()
if image is not None:
curname = image.get('name', 'NONAME')
if curname == imname:
viewer.clear()
channel.remove_image(imname)
def move_image_by_name(self, from_chname, imname, to_chname, impath=None):
channel_from = self.get_channel(from_chname)
channel_to = self.get_channel(to_chname)
channel_from.move_image_to(imname, channel_to)
def remove_current_image(self):
channel = self.get_current_channel()
viewer = channel.viewer
image = viewer.get_image()
if image is None:
return
imname = image.get('name', 'NONAME')
impath = image.get('path', None)
self.remove_image_by_name(channel.name, imname, impath=impath)
def follow_focus(self, tf):
self.settings['channel_follows_focus'] = tf
def show_status(self, text):
"""Write a message to the status bar.
Parameters
----------
text : str
The message.
"""
self.statusMsg("%s", text)
def error(self, text):
self.logger.error(text)
self.status_msg("%s", text)
# TODO: turn bar red
def logit(self, text):
try:
obj = self.gpmon.get_plugin('Log')
self.gui_do(obj.log, text)
except Exception:
pass
def set_loglevel(self, level):
handlers = self.logger.handlers
for hdlr in handlers:
hdlr.setLevel(level)
def play_soundfile(self, filepath, format=None, priority=20):
self.logger.debug("Subclass could override this to play sound file "
"'%s'" % (filepath))
def get_color_maps(self):
"""Get the list of named color maps.
Returns
-------
names : list
A list of all named colormaps installed.
"""
return cmap.get_names()
def get_intensity_maps(self):
"""Get the list of named intensity maps.
Returns
-------
names : list
A list of all named intensity maps installed.
"""
return imap.get_names()
def set_layout(self, layout, layout_file=None, save_layout=False,
main_wsname=None):
self.layout = layout
self.layout_file = layout_file
self.save_layout = save_layout
if main_wsname is not None:
self.main_wsname = main_wsname
def get_screen_dimensions(self):
return (self.screen_wd, self.screen_ht)
def build_toplevel(self, ignore_saved_layout=False):
lo_file = self.layout_file
if ignore_saved_layout:
lo_file = None
self.font = self.get_font('fixed', 12)
self.font11 = self.get_font('fixed', 11)
self.font14 = self.get_font('fixed', 14)
self.font18 = self.get_font('fixed', 18)
self.w.tooltips = None
self.ds = Desktop.Desktop(self)
self.ds.build_desktop(self.layout, lo_file=lo_file,
widget_dict=self.w)
if self.main_wsname is None:
ws = self.ds.get_default_ws()
if ws is not None:
self.main_wsname = ws.name
else:
# legacy value for layouts that don't define a default
# workspace
self.main_wsname = 'channels'
self._lastwsname = self.main_wsname
# TEMP: FIX ME!
self.gpmon.ds = self.ds
for win in self.ds.toplevels:
# add delete/destroy callbacks
win.add_callback('close', self.quit)
win.set_title("Ginga")
root = win
self.ds.add_callback('all-closed', self.quit)
self.w.root = root
self.w.fscreen = None
# get informed about window closures in existing workspaces
for wsname in self.ds.get_wsnames():
ws = self.ds.get_ws(wsname)
self.init_workspace(ws)
if 'menu' in self.w:
menuholder = self.w['menu']
self.w.menubar = self.add_menus(menuholder)
self.add_dialogs()
if 'status' in self.w:
statusholder = self.w['status']
self.add_statusbar(statusholder)
self.w.root.show()
def get_plugin_manager(self, logger, fitsview, ds, mm):
return PluginManager.PluginManager(logger, fitsview, ds, mm)
def _name_mangle(self, name, pfx=''):
newname = []
for c in name.lower():
if not (c.isalpha() or c.isdigit() or (c == '_')):
newname.append('_')
else:
newname.append(c)
return pfx + ''.join(newname)
def add_menus(self, holder):
menubar = Widgets.Menubar()
self.menubar = menubar
# NOTE: Special hack for Mac OS X. From the Qt documentation:
# "If you want all windows in a Mac application to share one
# menu bar, you must create a menu bar that does not have a
# parent."
macos_ver = platform.mac_ver()[0]
if len(macos_ver) > 0:
pass
else:
holder.add_widget(menubar, stretch=1)
# create a File pulldown menu, and add it to the menu bar
filemenu = menubar.add_name("File")
item = filemenu.add_name("Load Image")
item.add_callback('activated', lambda *args: self.gui_load_file())
item = filemenu.add_name("Remove Image")
item.add_callback("activated",
lambda *args: self.remove_current_image())
filemenu.add_separator()
item = filemenu.add_name("Quit")
item.add_callback('activated', lambda *args: self.window_close())
# create a Channel pulldown menu, and add it to the menu bar
chmenu = menubar.add_name("Channel")
item = chmenu.add_name("Add Channel")
item.add_callback('activated', lambda *args: self.gui_add_channel())
item = chmenu.add_name("Add Channels")
item.add_callback('activated', lambda *args: self.gui_add_channels())
item = chmenu.add_name("Delete Channel")
item.add_callback('activated', lambda *args: self.gui_delete_channel())
# create a Window pulldown menu, and add it to the menu bar
wsmenu = menubar.add_name("Workspace")
item = wsmenu.add_name("Add Workspace")
item.add_callback('activated', lambda *args: self.gui_add_ws())
# # create a Option pulldown menu, and add it to the menu bar
# optionmenu = menubar.add_name("Option")
# create a Plugins pulldown menu, and add it to the menu bar
plugmenu = menubar.add_name("Plugins")
self.w.menu_plug = plugmenu
# create a Help pulldown menu, and add it to the menu bar
helpmenu = menubar.add_name("Help")
item = helpmenu.add_name("About")
item.add_callback('activated',
lambda *args: self.banner(raiseTab=True))
item = helpmenu.add_name("Documentation")
item.add_callback('activated', lambda *args: self.help())
return menubar
def add_menu(self, name):
"""Add a menu with name `name` to the global menu bar.
Returns a menu widget.
"""
if self.menubar is None:
raise ValueError("No menu bar configured")
return self.menubar.add_name(name)
def get_menu(self, name):
"""Get the menu with name `name` from the global menu bar.
Returns a menu widget.
"""
if self.menubar is None:
raise ValueError("No menu bar configured")
return self.menubar.get_menu(name)
def add_dialogs(self):
if hasattr(GwHelp, 'FileSelection'):
self.filesel = GwHelp.FileSelection(self.w.root.get_widget(),
all_at_once=True)
def add_plugin_menu(self, name, spec):
# NOTE: self.w.menu_plug is a ginga.Widgets wrapper
if 'menu_plug' not in self.w:
return
category = spec.get('category', None)
categories = None
if category is not None:
categories = category.split('.')
menuname = spec.get('menu', spec.get('tab', name))
menu = self.w.menu_plug
if categories is not None:
for catname in categories:
try:
menu = menu.get_menu(catname)
except KeyError:
menu = menu.add_menu(catname)
item = menu.add_name(menuname)
item.add_callback('activated',
lambda *args: self.start_plugin(name, spec))
def add_statusbar(self, holder):
self.w.status = Widgets.StatusBar()
holder.add_widget(self.w.status, stretch=1)
def fullscreen(self):
self.w.root.fullscreen()
def normalsize(self):
self.w.root.unfullscreen()
def maximize(self):
self.w.root.maximize()
def toggle_fullscreen(self):
if not self.w.root.is_fullscreen():
self.w.root.fullscreen()
else:
self.w.root.unfullscreen()
def build_fullscreen(self):
w = self.w.fscreen
self.w.fscreen = None
if w is not None:
w.delete()
return
# Get image from current focused channel
channel = self.get_current_channel()
viewer = channel.fitsimage
settings = viewer.get_settings()
rgbmap = viewer.get_rgbmap()
root = Widgets.TopLevel()
vbox = Widgets.VBox()
vbox.set_border_width(0)
vbox.set_spacing(0)
root.set_widget(vbox)
fi = self.build_viewpane(settings, rgbmap=rgbmap)
iw = Viewers.GingaViewerWidget(viewer=fi)
vbox.add_widget(iw, stretch=1)
# Get image from current focused channel
image = viewer.get_image()
if image is None:
return
fi.set_image(image)
# Copy attributes of the frame
viewer.copy_attributes(fi, ['rgbmap']) # 'transforms', 'cutlevels'
root.fullscreen()
self.w.fscreen = root
def make_viewer(self, vinfo, channel):
"""Make a viewer whose salient info is in `vinfo` and add it to
`channel`.
"""
stk_w = channel.widget
viewer = vinfo.vclass(logger=self.logger,
settings=channel.settings)
stk_w.add_widget(viewer.get_widget(), title=vinfo.name)
# let the GUI respond to this widget addition
self.update_pending()
# let the channel object do any necessary initialization
channel.connect_viewer(viewer)
# finally, let the viewer do any viewer-side initialization
viewer.initialize_channel(self, channel)
####################################################
# THESE METHODS ARE CALLED FROM OTHER MODULES & OBJECTS
####################################################
def set_titlebar(self, text):
self.w.root.set_title("Ginga: %s" % text)
def build_viewpane(self, settings, rgbmap=None, size=(1, 1)):
# instantiate bindings loaded with users preferences
bclass = Viewers.ImageViewCanvas.bindingsClass
bindprefs = self.prefs.create_category('bindings')
bd = bclass(self.logger, settings=bindprefs)
wtype = 'widget'
if self.settings.get('use_opengl', False):
wtype = 'opengl'
fi = Viewers.ImageViewCanvas(logger=self.logger,
rgbmap=rgbmap,
settings=settings,
render=wtype,
bindings=bd)
fi.set_desired_size(size[0], size[1])
canvas = DrawingCanvas()
canvas.enable_draw(False)
fi.set_canvas(canvas)
# check general settings for default value of enter_focus
enter_focus = settings.get('enter_focus', None)
if enter_focus is None:
enter_focus = self.settings.get('enter_focus', True)
fi.set_enter_focus(enter_focus)
# check general settings for default value of focus indicator
focus_ind = settings.get('show_focus_indicator', None)
if focus_ind is None:
focus_ind = self.settings.get('show_focus_indicator', False)
fi.show_focus_indicator(focus_ind)
fi.use_image_profile = True
fi.add_callback('cursor-changed', self.motion_cb)
fi.add_callback('cursor-down', self.force_focus_cb)
fi.add_callback('key-down-none', self.keypress)
fi.add_callback('drag-drop', self.dragdrop)
fi.ui_set_active(True, viewer=fi)
bd = fi.get_bindings()
bd.enable_all(True)
fi.set_bg(0.2, 0.2, 0.2)
return fi
def add_viewer(self, name, settings, workspace=None):
vbox = Widgets.VBox()
vbox.set_border_width(1)
vbox.set_spacing(0)
if not workspace:
workspace = self.main_wsname
w = self.ds.get_nb(workspace)
size = (700, 700)
if isinstance(w, Widgets.MDIWidget) and w.true_mdi:
size = (300, 300)
# build image viewer & widget
fi = self.build_viewpane(settings, size=size)
# add scrollbar support
scr_val = settings.setdefault('scrollbars', None)
scr_set = settings.get_setting('scrollbars')
if scr_val is None:
# general settings as backup value if not overridden in channel
scr_val = self.settings.get('scrollbars', 'off')
si = Viewers.ScrolledView(fi)
si.scroll_bars(horizontal=scr_val, vertical=scr_val)
scr_set.add_callback('set', self._toggle_scrollbars, si)
iw = Widgets.wrap(si)
stk_w = Widgets.StackWidget()
stk_w.add_widget(iw, title='image')
fi.add_callback('focus', self.focus_cb, name)
vbox.add_widget(stk_w, stretch=1)
fi.set_name(name)
# Add the viewer to the specified workspace
self.ds.add_tab(workspace, vbox, 1, name)
self.update_pending()
bnch = Bunch.Bunch(image_viewer=fi,
widget=stk_w, container=vbox,
workspace=workspace)
return bnch
def _toggle_scrollbars(self, setting, value, widget):
widget.scroll_bars(horizontal=value, vertical=value)
def gui_add_channel(self, chname=None):
chpfx = self.settings.get('channel_prefix', "Image")
ws = self.get_current_workspace()
if ws is not None:
chpfx = ws.extdata.get('chpfx', chpfx)
if not chname:
chname = self.make_channel_name(chpfx)
captions = (('New channel name:', 'label', 'channel_name', 'entry'),
('In workspace:', 'label', 'workspace', 'combobox'),
)
w, b = Widgets.build_info(captions, orientation='vertical')
# populate values
b.channel_name.set_text(chname)
names = self.ds.get_wsnames()
try:
idx = names.index(self._lastwsname)
except Exception:
idx = 0
for name in names:
b.workspace.append_text(name)
b.workspace.set_index(idx)
# build dialog
dialog = Widgets.Dialog(title="Add Channel",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.add_channel_cb(w, rsp, b, names)) # noqa
box = dialog.get_content_area()
box.add_widget(w, stretch=0)
self.ds.show_dialog(dialog)
def gui_add_channels(self):
chpfx = self.settings.get('channel_prefix', "Image")
ws = self.get_current_workspace()
if ws is not None:
chpfx = ws.extdata.get('chpfx', chpfx)
captions = (('Prefix:', 'label', 'Prefix', 'entry'),
('Number:', 'label', 'Number', 'spinbutton'),
('In workspace:', 'label', 'workspace', 'combobox'),
)
w, b = Widgets.build_info(captions)
b.prefix.set_text(chpfx)
b.number.set_limits(1, 12, incr_value=1)
b.number.set_value(1)
names = self.ds.get_wsnames()
try:
idx = names.index(self.main_wsname)
except Exception:
idx = 0
for name in names:
b.workspace.append_text(name)
b.workspace.set_index(idx)
dialog = Widgets.Dialog(title="Add Channels",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.add_channels_cb(w, rsp, b, names)) # noqa
box = dialog.get_content_area()
box.add_widget(w, stretch=0)
self.ds.show_dialog(dialog)
def gui_delete_channel(self, chname=None):
if chname is None:
channel = self.get_channel(chname)
if (len(self.get_channel_names()) == 0) or (channel is None):
self.show_error("There are no more channels to delete.",
raisetab=True)
return
chname = channel.name
lbl = Widgets.Label("Really delete channel '%s' ?" % (chname))
dialog = Widgets.Dialog(title="Delete Channel",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.delete_channel_cb(w, rsp, chname)) # noqa
box = dialog.get_content_area()
box.add_widget(lbl, stretch=0)
self.ds.show_dialog(dialog)
def gui_delete_window(self, tabname):
lbl = Widgets.Label("Really delete window '%s' ?" % (tabname))
dialog = Widgets.Dialog(title="Delete Window",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.delete_tab_cb(w, rsp, tabname))
box = dialog.get_content_area()
box.add_widget(lbl, stretch=0)
self.ds.show_dialog(dialog)
def gui_delete_channel_ws(self, ws):
num_children = ws.num_pages()
if num_children == 0:
self.show_error("No channels in this workspace to delete.",
raisetab=True)
return
idx = ws.nb.get_index()
child = ws.nb.index_to_widget(idx)
chname = child.extdata.tab_title
if self.has_channel(chname):
self.gui_delete_channel(chname)
else:
self.gui_delete_window(chname)
def gui_add_ws(self):
chpfx = self.settings.get('channel_prefix', "Image")
ws = self.get_current_workspace()
if ws is not None:
chpfx = ws.extdata.get('chpfx', chpfx)
captions = (('Workspace name:', 'label', 'Workspace name', 'entry'),
('Workspace type:', 'label', 'Workspace type', 'combobox'),
('In workspace:', 'label', 'workspace', 'combobox'),
('Channel prefix:', 'label', 'Channel prefix', 'entry'),
('Number of channels:', 'label', 'num_channels',
'spinbutton'),
('Share settings:', 'label', 'Share settings', 'entry'),
)
w, b = Widgets.build_info(captions)
self.wscount += 1
wsname = "ws%d" % (self.wscount)
b.workspace_name.set_text(wsname)
#b.share_settings.set_length(60)
cbox = b.workspace_type
cbox.append_text("Tabs")
cbox.append_text("Grid")
cbox.append_text("MDI")
cbox.append_text("Stack")
cbox.set_index(0)
cbox = b.workspace
names = self.ds.get_wsnames()
names.insert(0, 'top level')
try:
idx = names.index(self.main_wsname)
except Exception:
idx = 0
for name in names:
cbox.append_text(name)
cbox.set_index(idx)
b.channel_prefix.set_text(chpfx)
spnbtn = b.num_channels
spnbtn.set_limits(0, 36, incr_value=1)
spnbtn.set_value(0)
dialog = Widgets.Dialog(title="Add Workspace",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.add_ws_cb(w, rsp, b, names))
box = dialog.get_content_area()
box.add_widget(w, stretch=1)
self.ds.show_dialog(dialog)
def gui_load_file(self, initialdir=None):
#self.start_operation('FBrowser')
self.filesel.popup("Load File", self.load_file_cb,
initialdir=initialdir)
def gui_choose_file_opener(self, msg, openers, open_cb, mimetype,
filepath):
wgts = Bunch.Bunch()
wgts.table = Widgets.TreeView(auto_expand=True,
use_alt_row_color=True)
columns = [('Name', 'name'),
('Note', 'note'),
]
wgts.table.setup_table(columns, 1, 'name')
tree_dict = OrderedDict()
openers = list(openers)
for bnch in openers:
tree_dict[bnch.name] = bnch
wgts.table.set_tree(tree_dict)
# highlight first choice
path = [openers[0].name]
wgts.table.select_path(path)
dialog = Widgets.Dialog(title="Choose File Opener",
flags=0,
modal=False,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.choose_opener_cb(w, rsp, wgts,
openers,
open_cb,
mimetype))
box = dialog.get_content_area()
box.set_border_width(4)
box.add_widget(Widgets.Label(msg), stretch=0)
box.add_widget(wgts.table, stretch=1)
if mimetype is not None:
hbox = Widgets.HBox()
wgts.choice = Widgets.CheckBox("Remember choice for session")
hbox.add_widget(wgts.choice)
box.add_widget(hbox, stretch=0)
else:
wgts.choice = None
self.ds.show_dialog(dialog)
def gui_choose_viewer(self, msg, viewers, open_cb, dataobj):
wgts = Bunch.Bunch()
wgts.table = Widgets.TreeView(auto_expand=True,
use_alt_row_color=True)
columns = [('Name', 'name'),
#('Note', 'note'),
]
wgts.table.setup_table(columns, 1, 'name')
tree_dict = OrderedDict()
openers = list(viewers)
for bnch in viewers:
tree_dict[bnch.name] = bnch
# set up widget to show viewer description when they click on it
wgts.descr = Widgets.TextArea(wrap=True, editable=False)
def _select_viewer_cb(w, dct):
vclass = list(dct.values())[0].vclass
text = inspect.getdoc(vclass)
if text is None:
text = "(No description available)"
wgts.descr.set_text(text)
wgts.table.add_callback('selected', _select_viewer_cb)
wgts.table.set_tree(tree_dict)
# highlight first choice
path = [openers[0].name]
wgts.table.select_path(path)
dialog = Widgets.Dialog(title="Choose viewer",
flags=0,
modal=False,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.choose_viewer_cb(w, rsp, wgts,
viewers,
open_cb,
dataobj))
box = dialog.get_content_area()
box.set_border_width(4)
box.add_widget(Widgets.Label(msg), stretch=0)
box.add_widget(wgts.table, stretch=0)
box.add_widget(wgts.descr, stretch=1)
## if mimetype is not None:
## hbox = Widgets.HBox()
## wgts.choice = Widgets.CheckBox("Remember choice for session")
## hbox.add_widget(wgts.choice)
## box.add_widget(hbox, stretch=0)
## else:
## wgts.choice = None
self.ds.show_dialog(dialog)
dialog.resize(600, 600)
def status_msg(self, format, *args):
if not format:
s = ''
else:
s = format % args
if 'status' in self.w:
self.w.status.set_message(s)
def set_pos(self, x, y):
self.w.root.move(x, y)
def set_size(self, wd, ht):
self.w.root.resize(wd, ht)
def set_geometry(self, geometry):
# translation of X window geometry specification WxH+X+Y
coords = geometry.replace('+', ' +')
coords = coords.replace('-', ' -')
coords = coords.split()
if 'x' in coords[0]:
# spec includes dimensions
dim = coords[0]
coords = coords[1:]
else:
# spec is position only
dim = None
if dim is not None:
# user specified dimensions
dim = list(map(int, dim.split('x')))
self.set_size(*dim)
if len(coords) > 0:
# user specified position
coords = list(map(int, coords))
self.set_pos(*coords)
def collapse_pane(self, side):
"""
Toggle collapsing the left or right panes.
"""
# TODO: this is too tied to one configuration, need to figure
# out how to generalize this
hsplit = self.w['hpnl']
sizes = hsplit.get_sizes()
lsize, msize, rsize = sizes
if self._lsize is None:
self._lsize, self._rsize = lsize, rsize
self.logger.debug("left=%d mid=%d right=%d" % (
lsize, msize, rsize))
if side == 'right':
if rsize < 10:
# restore pane
rsize = self._rsize
msize -= rsize
else:
# minimize pane
self._rsize = rsize
msize += rsize
rsize = 0
elif side == 'left':
if lsize < 10:
# restore pane
lsize = self._lsize
msize -= lsize
else:
# minimize pane
self._lsize = lsize
msize += lsize
lsize = 0
hsplit.set_sizes([lsize, msize, rsize])
def get_font(self, font_family, point_size):
return GwHelp.get_font(font_family, point_size)
def get_icon(self, icondir, filename):
iconpath = os.path.join(icondir, filename)
icon = GwHelp.get_icon(iconpath)
return icon
####################################################
# CALLBACKS
####################################################
def window_close(self, *args):
"""Quit the application.
"""
self.quit()
def quit(self, *args):
"""Quit the application.
"""
self.logger.info("Attempting to shut down the application...")
self.stop()
# stop plugins in every channel
for chname in self.get_channel_names():
channel = self.get_channel(chname)
opmon = channel.opmon
opmon.stop_all_plugins()
# stop all global plugins
self.gpmon.stop_all_plugins()
# write out our current layout
if self.layout_file is not None and self.save_layout:
self.error_wrap(self.ds.write_layout_conf, self.layout_file)
self.w.root = None
while len(self.ds.toplevels) > 0:
w = self.ds.toplevels.pop()
w.delete()
def add_channel_cb(self, w, rsp, b, names):
chname = str(b.channel_name.get_text())
idx = b.workspace.get_index()
if idx < 0:
idx = 0
wsname = names[idx]
self.ds.remove_dialog(w)
# save name for next add
self._lastwsname = wsname
if rsp != 1:
return
if self.has_channel(chname):
self.show_error("Channel name already in use: '%s'" % (chname))
return True
self.error_wrap(self.add_channel, chname, workspace=wsname)
return True
def add_channels_cb(self, w, rsp, b, names):
chpfx = b.prefix.get_text()
idx = b.workspace.get_index()
wsname = names[idx]
num = int(b.number.get_value())
self.ds.remove_dialog(w)
if (rsp != 1) or (num <= 0):
return
for i in range(num):
chname = self.make_channel_name(chpfx)
self.error_wrap(self.add_channel, chname, workspace=wsname)
return True
def delete_channel_cb(self, w, rsp, chname):
self.ds.remove_dialog(w)
if rsp != 1:
return
self.delete_channel(chname)
return True
def delete_tab_cb(self, w, rsp, tabname):
self.ds.remove_dialog(w)
if rsp != 1:
return
self.ds.remove_tab(tabname)
return True
def __next_dialog(self):
with self.gui_dialog_lock:
# this should be the just completed call for a dialog
# that gets popped off
self.gui_dialog_list.pop(0)
if len(self.gui_dialog_list) > 0:
# if there are any other dialogs waiting, start
# the next one
future = self.gui_dialog_list[0]
self.nongui_do_future(future)
def choose_opener_cb(self, w, rsp, wgts, openers, open_cb, mimetype):
sel_dct = wgts.table.get_selected()
if rsp != 1:
# cancel
self.ds.remove_dialog(w)
self.__next_dialog()
return
bnchs = list(sel_dct.values())
if len(bnchs) != 1:
# user didn't select an opener
self.show_error("Need to select one opener!", raisetab=True)
return
bnch = bnchs[0]
self.ds.remove_dialog(w)
if wgts.choice is not None and wgts.choice.get_state():
# user wants us to remember their choice
if mimetype != '*':
# loader is not registered for this mimetype, so go ahead
# and do it
loader.add_opener(mimetype, bnch.opener,
priority=bnch.priority, note=bnch.note)
else:
# multiple loaders for the same mimetype--
# remember setting by prioritizing choice
bnch.priority = -1
self.nongui_do(open_cb, bnch.opener)
self.__next_dialog()
return True
def choose_viewer_cb(self, w, rsp, wgts, viewers, open_cb, dataobj):
sel_dct = wgts.table.get_selected()
if rsp != 1:
# cancel
self.ds.remove_dialog(w)
return
bnchs = list(sel_dct.values())
if len(bnchs) != 1:
# user didn't select an opener
self.show_error("Need to select one viewer!", raisetab=True)
return
bnch = bnchs[0]
self.ds.remove_dialog(w)
open_cb(bnch, dataobj)
return True
def init_workspace(self, ws):
# add close handlers
ws.add_callback('ws-close', self.workspace_closed_cb)
if ws.has_callback('page-close'):
ws.add_callback('page-close', self.page_close_cb)
if ws.has_callback('page-switch'):
ws.add_callback('page-switch', self.page_switch_cb)
if ws.has_callback('page-added'):
ws.add_callback('page-added', self.page_added_cb)
if ws.has_callback('page-removed'):
ws.add_callback('page-removed', self.page_removed_cb)
if ws.toolbar is not None:
tb = ws.toolbar
tb.add_separator()
# add toolbar buttons for navigating between tabs
iconpath = os.path.join(self.iconpath, "prev_48.png")
btn = tb.add_action(None, iconpath=iconpath, iconsize=(24, 24))
btn.set_tooltip("Focus previous tab in this workspace")
btn.add_callback('activated', lambda w: self.prev_channel_ws(ws))
ws.extdata.w_prev_tab = btn
btn.set_enabled(False)
iconpath = os.path.join(self.iconpath, "next_48.png")
btn = tb.add_action(None, iconpath=iconpath, iconsize=(24, 24))
btn.set_tooltip("Focus next tab in this workspace")
btn.add_callback('activated', lambda w: self.next_channel_ws(ws))
ws.extdata.w_next_tab = btn
btn.set_enabled(False)
tb.add_separator()
entry = Widgets.TextEntry()
entry.set_length(8)
chpfx = self.settings.get('channel_prefix', "Image")
entry.set_text(chpfx)
entry.set_tooltip("Name or prefix for a new channel")
ws.extdata.w_chname = entry
btn = tb.add_widget(entry)
# add toolbar buttons adding and deleting channels
iconpath = os.path.join(self.iconpath, "inbox_plus_48.png")
btn = tb.add_action(None, iconpath=iconpath, iconsize=(24, 23))
btn.set_tooltip("Add a channel to this workspace")
btn.add_callback('activated',
lambda w: self.add_channel_auto_ws(ws))
ws.extdata.w_new_channel = btn
iconpath = os.path.join(self.iconpath, "inbox_minus_48.png")
btn = tb.add_action(None, iconpath=iconpath, iconsize=(24, 23))
btn.set_tooltip("Delete current channel from this workspace")
btn.add_callback('activated',
lambda w: self.gui_delete_channel_ws(ws))
btn.set_enabled(False)
ws.extdata.w_del_channel = btn
def add_ws_cb(self, w, rsp, b, names):
try:
wsname = str(b.workspace_name.get_text())
wstype = b.workspace_type.get_text().lower()
if rsp != 1:
self.ds.remove_dialog(w)
return
try:
nb = self.ds.get_nb(wsname) # noqa
self.show_error(
"Workspace name '%s' cannot be used, sorry." % (wsname),
raisetab=True)
self.ds.remove_dialog(w)
return
except KeyError:
pass
in_space = b.workspace.get_text()
chpfx = b.channel_prefix.get_text().strip()
num = int(b.num_channels.get_value())
share_list = b.share_settings.get_text().split()
self.ds.remove_dialog(w)
ws = self.error_wrap(self.add_workspace, wsname, wstype,
inSpace=in_space)
ws.extdata.chpfx = chpfx
self.init_workspace(ws)
if num <= 0:
return
# Create a settings template to copy settings from
name = "channel_template_%f" % (time.time())
settings = self.prefs.create_category(name)
try:
settings_template = self.prefs.get_settings('channel_Image')
settings_template.copy_settings(settings)
except KeyError:
settings_template = None
for i in range(num):
chname = self.make_channel_name(chpfx)
self.add_channel(chname, workspace=wsname,
settings_template=settings_template,
settings_share=settings,
share_keylist=share_list)
except Exception as e:
self.logger.error("Exception building workspace: %s" % (str(e)))
return True
def load_file_cb(self, paths):
# NOTE: this dialog callback is handled a little differently
# from some of the other pop-ups. It only gets called if a
# file was selected and "Open" clicked. This is due to the
# use of FileSelection rather than Dialog widget
self.open_uris(paths)
def _get_channel_by_container(self, child):
for chname in self.get_channel_names():
channel = self.get_channel(chname)
if channel.container == child:
return channel
return None
def page_switch_cb(self, ws, child):
self.logger.debug("page switched to %s" % (str(child)))
# Find the channel that contains this widget
channel = self._get_channel_by_container(child)
self.logger.debug("channel: %s" % (str(channel)))
if channel is not None:
viewer = channel.viewer
if viewer != self.getfocus_viewer():
chname = channel.name
self.logger.debug("Active channel switch to '%s'" % (
chname))
self.change_channel(chname, raisew=False)
return True
def workspace_closed_cb(self, ws):
self.logger.debug("workspace requests close")
num_children = ws.num_pages()
if num_children > 0:
self.show_error(
"Please close all windows in this workspace first!",
raisetab=True)
return
# TODO: this will prompt the user if we should close the workspace
lbl = Widgets.Label("Really delete workspace '%s' ?" % (ws.name))
dialog = Widgets.Dialog(title="Delete Workspace",
flags=0,
buttons=[['Cancel', 0], ['Ok', 1]],
parent=self.w.root)
dialog.add_callback('activated',
lambda w, rsp: self.delete_workspace_cb(w, rsp,
ws))
box = dialog.get_content_area()
box.add_widget(lbl, stretch=0)
self.ds.show_dialog(dialog)
def delete_workspace_cb(self, w, rsp, ws):
self.ds.remove_dialog(w)
if rsp == 0:
return
top_w = ws.extdata.get('top_w', None)
if top_w is None:
self.ds.remove_tab(ws.name)
else:
# this is a top-level window
self.ds.remove_toplevel(top_w)
# inform desktop we are no longer tracking this
self.ds.delete_ws(ws.name)
return True
def page_added_cb(self, ws, child):
self.logger.debug("page added in %s: '%s'" % (ws.name, str(child)))
num_pages = ws.num_pages()
if ws.toolbar is not None:
if num_pages > 1:
ws.extdata.w_prev_tab.set_enabled(True)
ws.extdata.w_next_tab.set_enabled(True)
ws.extdata.w_del_channel.set_enabled(True)
def page_removed_cb(self, ws, child):
self.logger.debug("page removed in %s: '%s'" % (ws.name, str(child)))
num_pages = ws.num_pages()
if num_pages <= 1:
if ws.toolbar is not None:
ws.extdata.w_prev_tab.set_enabled(False)
ws.extdata.w_next_tab.set_enabled(False)
if num_pages <= 0:
ws.extdata.w_del_channel.set_enabled(False)
def page_close_cb(self, ws, child):
# user is attempting to close the page
self.logger.debug("page closed in %s: '%s'" % (ws.name, str(child)))
channel = self._get_channel_by_container(child)
if channel is not None:
self.gui_delete_channel(channel.name)
def showxy(self, viewer, data_x, data_y):
"""Called by the mouse-tracking callback to handle reporting of
cursor position to various plugins that subscribe to the
'field-info' callback.
"""
# This is an optimization to get around slow coordinate
# transformation by astropy and possibly other WCS packages,
# which causes delay for other mouse tracking events, e.g.
# the zoom plugin.
# We only update the under cursor information every period
# defined by (self.cursor_interval) sec.
#
# If the refresh interval has expired then update the info;
# otherwise (re)set the timer until the end of the interval.
cur_time = time.time()
elapsed = cur_time - self._cursor_last_update
if elapsed > self.cursor_interval:
# cancel timer
self._cursor_task.clear()
self.gui_do_oneshot('field-info', self._showxy,
viewer, data_x, data_y)
else:
# store needed data into the timer data area
self._cursor_task.data.setvals(viewer=viewer,
data_x=data_x, data_y=data_y)
# calculate delta until end of refresh interval
period = self.cursor_interval - elapsed
# set timer conditionally (only if it hasn't yet been set)
self._cursor_task.cond_set(period)
return True
def _cursor_timer_cb(self, timer):
"""Callback when the cursor timer expires.
"""
data = timer.data
self.gui_do_oneshot('field-info', self._showxy,
data.viewer, data.data_x, data.data_y)
def _showxy(self, viewer, data_x, data_y):
"""Update the info from the last position recorded under the cursor.
"""
self._cursor_last_update = time.time()
try:
image = viewer.get_vip()
if image.ndim < 2:
return
settings = viewer.get_settings()
info = image.info_xy(data_x, data_y, settings)
# Are we reporting in data or FITS coordinates?
off = self.settings.get('pixel_coords_offset', 0.0)
info.x += off
info.y += off
if 'image_x' in info:
info.image_x += off
if 'image_y' in info:
info.image_y += off
except Exception as e:
self.logger.warning(
"Can't get info under the cursor: %s" % (str(e)), exc_info=True)
return
# TODO: can this be made more efficient?
chname = self.get_channel_name(viewer)
channel = self.get_channel(chname)
self.make_callback('field-info', channel, info)
self.update_pending()
return True
def motion_cb(self, viewer, button, data_x, data_y):
"""Motion event in the channel viewer window. Show the pointing
information under the cursor.
"""
self.showxy(viewer, data_x, data_y)
return True
def keypress(self, viewer, event, data_x, data_y):
"""Key press event in a channel window."""
keyname = event.key
chname = self.get_channel_name(viewer)
self.logger.debug("key press (%s) in channel %s" % (
keyname, chname))
# TODO: keyboard accelerators to raise tabs need to be integrated into
# the desktop object
if keyname == 'Z':
self.ds.raise_tab('Zoom')
## elif keyname == 'T':
## self.ds.raise_tab('Thumbs')
elif keyname == 'I':
self.ds.raise_tab('Info')
elif keyname == 'H':
self.ds.raise_tab('Header')
elif keyname == 'C':
self.ds.raise_tab('Contents')
elif keyname == 'D':
self.ds.raise_tab('Dialogs')
elif keyname == 'F':
self.build_fullscreen()
elif keyname == 'f':
self.toggle_fullscreen()
elif keyname == 'm':
self.maximize()
elif keyname == '<':
self.collapse_pane('left')
elif keyname == '>':
self.collapse_pane('right')
elif keyname == 'n':
self.next_channel()
elif keyname == 'J':
self.cycle_workspace_type()
elif keyname == 'k':
self.add_channel_auto()
elif keyname == 'K':
self.remove_channel_auto()
elif keyname == 'f1':
self.show_channel_names()
## elif keyname == 'escape':
## self.reset_viewer()
elif keyname in ('up',):
self.prev_img()
elif keyname in ('down',):
self.next_img()
elif keyname in ('left',):
self.prev_channel()
elif keyname in ('right',):
self.next_channel()
return True
def dragdrop(self, chviewer, uris):
"""Called when a drop operation is performed on a channel viewer.
We are called back with a URL and we attempt to (down)load it if it
names a file.
"""
# find out our channel
chname = self.get_channel_name(chviewer)
self.open_uris(uris, chname=chname)
return True
def force_focus_cb(self, viewer, event, data_x, data_y):
chname = self.get_channel_name(viewer)
channel = self.get_channel(chname)
v = channel.viewer
if hasattr(v, 'take_focus'):
v.take_focus()
if not self.settings.get('channel_follows_focus', False):
self.change_channel(chname, raisew=True)
return True
def focus_cb(self, viewer, tf, name):
"""Called when ``viewer`` gets ``(tf==True)`` or loses
``(tf==False)`` the focus.
"""
if not self.settings.get('channel_follows_focus', False):
return True
self.logger.debug("focus %s=%s" % (name, tf))
if tf:
if viewer != self.getfocus_viewer():
self.change_channel(name, raisew=False)
return True
def show_channel_names(self):
"""Show each channel's name in its image viewer.
Useful in 'grid' or 'stack' workspace type to identify which window
is which.
"""
for name in self.get_channel_names():
channel = self.get_channel(name)
channel.fitsimage.onscreen_message(name, delay=2.5)
########################################################
### NON-PEP8 PREDECESSORS: TO BE DEPRECATED
def name_image_from_path(self, path, idx=None):
self.logger.warning("This function has moved to the"
" 'ginga.util.iohelper' module,"
" and will be deprecated soon.")
return iohelper.name_image_from_path(path, idx=idx)
def get_fileinfo(self, filespec, dldir=None):
self.logger.warning("This function has moved to the"
" 'ginga.util.iohelper' module,"
" and will be deprecated soon.")
return iohelper.get_fileinfo(filespec, cache_dir=dldir)
getDrawClass = get_draw_class
getDrawClasses = get_draw_classes
get_channelName = get_channel_name
get_channelInfo = get_channel_info
get_channelNames = get_channel_names
followFocus = follow_focus
showStatus = show_status
getfocus_fitsimage = getfocus_viewer
get_fitsimage = get_viewer
get_ServerBank = get_server_bank
getFont = get_font
getPluginManager = get_plugin_manager
statusMsg = status_msg
class GuiLogHandler(logging.Handler):
"""Logs to a pane in the GUI."""
def __init__(self, fv, level=logging.NOTSET):
self.fv = fv
logging.Handler.__init__(self, level=level)
def emit(self, record):
text = self.format(record)
self.fv.logit(text)
def _rmtmpdir(tmpdir):
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
|
import calendar
import collections
import contextlib
import datetime
import errno
import functools
import itertools
import json
import operator
import os
import random
import re
import shutil
import time
import unicodedata
import urllib
import urlparse
import string
import subprocess
import scandir
import django.core.mail
from django.conf import settings
from django.core.cache import cache
from django.core.files.storage import (
FileSystemStorage, default_storage as storage)
from django.core.paginator import (
EmptyPage, InvalidPage, Paginator as DjangoPaginator)
from django.core.validators import ValidationError, validate_slug
from django.forms.fields import Field
from django.http import HttpResponse
from django.template import engines, loader
from django.utils import translation
from django.utils.encoding import force_bytes, force_text
from django.utils.http import _urlparse as django_urlparse, quote_etag
import bleach
import html5lib
import jinja2
import pytz
import basket
from babel import Locale
from django_statsd.clients import statsd
from easy_thumbnails import processors
from html5lib.serializer import HTMLSerializer
from PIL import Image
from rest_framework.utils.encoders import JSONEncoder
from olympia.amo import ADDON_ICON_SIZES, search
from olympia.amo.pagination import ESPaginator
from olympia.amo.urlresolvers import linkify_with_outgoing, reverse
from olympia.translations.models import Translation
from olympia.users.models import UserNotification
from olympia.users.utils import UnsubscribeCode
from olympia.lib import unicodehelper
from . import logger_log as log
def render(request, template, ctx=None, status=None, content_type=None):
rendered = loader.render_to_string(template, ctx, request=request)
return HttpResponse(rendered, status=status, content_type=content_type)
def from_string(string):
return engines['jinja2'].from_string(string)
def days_ago(n):
return datetime.datetime.now() - datetime.timedelta(days=n)
def urlparams(url_, hash=None, **query):
"""
Add a fragment and/or query parameters to a URL.
New query params will be appended to existing parameters, except duplicate
names, which will be replaced.
"""
url = django_urlparse(force_text(url_))
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(force_bytes(q))) if q else {}
query_dict.update(
(k, force_bytes(v) if v is not None else v) for k, v in query.items())
query_string = urlencode(
[(k, urllib.unquote(v)) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def partial(func, *args, **kw):
"""A thin wrapper around functools.partial which updates the wrapper
as would a decorator."""
return functools.update_wrapper(functools.partial(func, *args, **kw), func)
def isotime(t):
"""Date/Time format according to ISO 8601"""
if not hasattr(t, 'tzinfo'):
return
return _append_tz(t).astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
def epoch(t):
"""Date/Time converted to seconds since epoch"""
if not hasattr(t, 'tzinfo'):
return
return int(time.mktime(_append_tz(t).timetuple()))
def _append_tz(t):
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(t)
def sorted_groupby(seq, key):
"""
Given a sequence, we sort it and group it by a key.
key should be a string (used with attrgetter) or a function.
"""
if not hasattr(key, '__call__'):
key = operator.attrgetter(key)
return itertools.groupby(sorted(seq, key=key), key=key)
def paginate(request, queryset, per_page=20, count=None):
"""
Get a Paginator, abstracting some common paging actions.
If you pass ``count``, that value will be used instead of calling
``.count()`` on the queryset. This can be good if the queryset would
produce an expensive count query.
"""
if isinstance(queryset, search.ES):
paginator = ESPaginator(
queryset, per_page, use_elasticsearch_dsl=False)
else:
paginator = DjangoPaginator(queryset, per_page)
if count is not None:
paginator.count = count
# Get the page from the request, make sure it's an int.
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
# Get a page of results, or the first page if there's a problem.
try:
paginated = paginator.page(page)
except (EmptyPage, InvalidPage):
paginated = paginator.page(1)
paginated.url = u'%s?%s' % (request.path, request.GET.urlencode())
return paginated
def decode_json(json_string):
"""Helper that transparently handles BOM encoding."""
return json.loads(unicodehelper.decode(json_string))
def send_mail(subject, message, from_email=None, recipient_list=None,
use_deny_list=True, perm_setting=None, manage_url=None,
headers=None, cc=None, real_email=False, html_message=None,
attachments=None, max_retries=3, reply_to=None):
"""
A wrapper around django.core.mail.EmailMessage.
Adds deny checking and error logging.
"""
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tasks import send_email
from olympia.users import notifications
if not recipient_list:
return True
if isinstance(recipient_list, basestring):
raise ValueError('recipient_list should be a list, not a string.')
# Check against user notification settings
if perm_setting:
if isinstance(perm_setting, str):
perm_setting = notifications.NOTIFICATIONS_BY_SHORT[perm_setting]
perms = dict(UserNotification.objects
.filter(user__email__in=recipient_list,
notification_id=perm_setting.id)
.values_list('user__email', 'enabled'))
d = perm_setting.default_checked
recipient_list = [e for e in recipient_list
if e and perms.setdefault(e, d)]
# Prune denied emails.
if use_deny_list:
white_list = []
for email in recipient_list:
if email and email.lower() in settings.EMAIL_DENY_LIST:
log.debug('Blacklisted email removed from list: %s' % email)
else:
white_list.append(email)
else:
white_list = recipient_list
if not from_email:
from_email = settings.DEFAULT_FROM_EMAIL
if cc:
# If not basestring, assume it is already a list.
if isinstance(cc, basestring):
cc = [cc]
if not headers:
headers = {}
# Avoid auto-replies per rfc 3834 and the Microsoft variant
headers['X-Auto-Response-Suppress'] = 'RN, NRN, OOF, AutoReply'
headers['Auto-Submitted'] = 'auto-generated'
def send(recipients, message, **options):
kwargs = {
'attachments': attachments,
'cc': cc,
'from_email': from_email,
'headers': headers,
'html_message': html_message,
'max_retries': max_retries,
'real_email': real_email,
'reply_to': reply_to,
}
kwargs.update(options)
# Email subject *must not* contain newlines
args = (list(recipients), ' '.join(subject.splitlines()), message)
return send_email.delay(*args, **kwargs)
if white_list:
if perm_setting:
html_template = loader.get_template('amo/emails/unsubscribe.html')
text_template = loader.get_template('amo/emails/unsubscribe.ltxt')
if not manage_url:
manage_url = urlparams(absolutify(
reverse('users.edit', add_prefix=False)),
'acct-notify')
for recipient in white_list:
# Add unsubscribe link to footer.
token, hash = UnsubscribeCode.create(recipient)
unsubscribe_url = absolutify(
reverse('users.unsubscribe',
args=[token, hash, perm_setting.short],
add_prefix=False))
context = {
'message': message,
'manage_url': manage_url,
'unsubscribe_url': unsubscribe_url,
'perm_setting': perm_setting.label,
'SITE_URL': settings.SITE_URL,
'mandatory': perm_setting.mandatory,
}
# Render this template in the default locale until
# bug 635840 is fixed.
with translation.override(settings.LANGUAGE_CODE):
message_with_unsubscribe = text_template.render(context)
if html_message:
context['message'] = html_message
with translation.override(settings.LANGUAGE_CODE):
html_with_unsubscribe = html_template.render(context)
result = send([recipient], message_with_unsubscribe,
html_message=html_with_unsubscribe,
attachments=attachments)
else:
result = send([recipient], message_with_unsubscribe,
attachments=attachments)
else:
result = send(recipient_list, message=message,
html_message=html_message, attachments=attachments)
else:
result = True
return result
@contextlib.contextmanager
def no_jinja_autoescape():
"""Disable Jinja2 autoescape."""
autoescape_orig = engines['jinja2'].env.autoescape
engines['jinja2'].env.autoescape = False
yield
engines['jinja2'].env.autoescape = autoescape_orig
def send_mail_jinja(subject, template, context, *args, **kwargs):
"""Sends mail using a Jinja template with autoescaping turned off.
Jinja is especially useful for sending email since it has whitespace
control.
"""
with no_jinja_autoescape():
template = loader.get_template(template)
msg = send_mail(subject, template.render(context), *args, **kwargs)
return msg
def send_html_mail_jinja(subject, html_template, text_template, context,
*args, **kwargs):
"""Sends HTML mail using a Jinja template with autoescaping turned off."""
# Get a jinja environment so we can override autoescaping for text emails.
with no_jinja_autoescape():
html_template = loader.get_template(html_template)
text_template = loader.get_template(text_template)
msg = send_mail(subject, text_template.render(context),
html_message=html_template.render(context), *args,
**kwargs)
return msg
def sync_user_with_basket(user):
"""Syncronize a user with basket.
Returns the user data in case of a successful sync.
Returns `None` in case of an unsuccessful sync. This can happen
if the user does not exist in basket yet.
This raises an exception all other errors.
"""
try:
data = basket.lookup_user(user.email)
user.update(basket_token=data['token'])
return data
except Exception as exc:
acceptable_errors = (
basket.errors.BASKET_INVALID_EMAIL,
basket.errors.BASKET_UNKNOWN_EMAIL)
if getattr(exc, 'code', None) in acceptable_errors:
return None
else:
raise
def fetch_subscribed_newsletters(user_profile):
data = sync_user_with_basket(user_profile)
if not user_profile.basket_token and data is not None:
user_profile.update(basket_token=data['token'])
elif data is None:
return []
return data['newsletters']
def subscribe_newsletter(user_profile, basket_id, request=None):
response = basket.subscribe(
user_profile.email,
basket_id,
sync='Y',
source_url=request.build_absolute_uri() if request else None,
optin='Y')
return response['status'] == 'ok'
def unsubscribe_newsletter(user_profile, basket_id):
# Security check, the basket token will be set by
# `fetch_subscribed_newsletters` but since we shouldn't simply error
# we just fetch it in case something went wrong.
if not user_profile.basket_token:
sync_user_with_basket(user_profile)
# If we still don't have a basket token we can't unsubscribe.
# This usually means the user doesn't exist in basket yet, which
# is more or less identical with not being subscribed to any
# newsletters.
if user_profile.basket_token:
response = basket.unsubscribe(
user_profile.basket_token, user_profile.email, basket_id)
return response['status'] == 'ok'
return False
def chunked(seq, n):
"""
Yield successive n-sized chunks from seq.
>>> for group in chunked(range(8), 3):
... print group
[0, 1, 2]
[3, 4, 5]
[6, 7]
"""
seq = iter(seq)
while 1:
rv = list(itertools.islice(seq, 0, n))
if not rv:
break
yield rv
def urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, force_bytes(v)) for k, v in items])
def randslice(qs, limit, exclude=None):
"""
Get a random slice of items from ``qs`` of size ``limit``.
There will be two queries. One to find out how many elements are in ``qs``
and another to get a slice. The count is so we don't go out of bounds.
If exclude is given, we make sure that pk doesn't show up in the slice.
This replaces qs.order_by('?')[:limit].
"""
cnt = qs.count()
# Get one extra in case we find the element that should be excluded.
if exclude is not None:
limit += 1
rand = 0 if limit > cnt else random.randint(0, cnt - limit)
slice_ = list(qs[rand:rand + limit])
if exclude is not None:
slice_ = [o for o in slice_ if o.pk != exclude][:limit - 1]
return slice_
SLUG_OK = '-_~'
def slugify(s, ok=SLUG_OK, lower=True, spaces=False, delimiter='-'):
# L and N signify letter/number.
# http://www.unicode.org/reports/tr44/tr44-4.html#GC_Values_Table
rv = []
for c in force_text(s):
cat = unicodedata.category(c)[0]
if cat in 'LN' or c in ok:
rv.append(c)
if cat == 'Z': # space
rv.append(' ')
new = ''.join(rv).strip()
if not spaces:
new = re.sub('[-\s]+', delimiter, new)
return new.lower() if lower else new
def normalize_string(value, strip_puncutation=False):
"""Normalizes a unicode string.
* decomposes unicode characters
* strips whitespaces, newlines and tabs
* optionally removes puncutation
"""
value = unicodedata.normalize('NFD', force_text(value))
value = value.encode('utf-8', 'ignore')
if strip_puncutation:
value = value.translate(None, string.punctuation)
return force_text(' '.join(value.split()))
def slug_validator(s, ok=SLUG_OK, lower=True, spaces=False, delimiter='-',
message=validate_slug.message, code=validate_slug.code):
"""
Raise an error if the string has any punctuation characters.
Regexes don't work here because they won't check alnums in the right
locale.
"""
if not (s and slugify(s, ok, lower, spaces, delimiter) == s):
raise ValidationError(message, code=code)
def raise_required():
raise ValidationError(Field.default_error_messages['required'])
def clean_nl(string):
"""
This will clean up newlines so that nl2br can properly be called on the
cleaned text.
"""
html_blocks = ['{http://www.w3.org/1999/xhtml}blockquote',
'{http://www.w3.org/1999/xhtml}ol',
'{http://www.w3.org/1999/xhtml}li',
'{http://www.w3.org/1999/xhtml}ul']
if not string:
return string
def parse_html(tree):
# In etree, a tag may have:
# - some text content (piece of text before its first child)
# - a tail (piece of text just after the tag, and before a sibling)
# - children
# Eg: "<div>text <b>children's text</b> children's tail</div> tail".
# Strip new lines directly inside block level elements: first new lines
# from the text, and:
# - last new lines from the tail of the last child if there's children
# (done in the children loop below).
# - or last new lines from the text itself.
if tree.tag in html_blocks:
if tree.text:
tree.text = tree.text.lstrip('\n')
if not len(tree): # No children.
tree.text = tree.text.rstrip('\n')
# Remove the first new line after a block level element.
if tree.tail and tree.tail.startswith('\n'):
tree.tail = tree.tail[1:]
for child in tree: # Recurse down the tree.
if tree.tag in html_blocks:
# Strip new lines directly inside block level elements: remove
# the last new lines from the children's tails.
if child.tail:
child.tail = child.tail.rstrip('\n')
parse_html(child)
return tree
parse = parse_html(html5lib.parseFragment(string))
# Serialize the parsed tree back to html.
walker = html5lib.treewalkers.getTreeWalker('etree')
stream = walker(parse)
serializer = HTMLSerializer(quote_attr_values='always',
omit_optional_tags=False)
return serializer.render(stream)
def image_size(filename):
"""
Return an image size tuple, as returned by PIL.
"""
with Image.open(filename) as img:
size = img.size
return size
def pngcrush_image(src, **kw):
"""
Optimizes a PNG image by running it through Pngcrush.
"""
log.info('Optimizing image: %s' % src)
try:
# When -ow is used, the output file name (second argument after
# options) is used as a temporary filename (that must reside on the
# same filesystem as the original) to save the optimized file before
# overwriting the original. By default it's "pngout.png" but we want
# that to be unique in order to avoid clashes with multiple tasks
# processing different images in parallel.
tmp_path = '%s.crush.png' % os.path.splitext(src)[0]
# -brute is not recommended, and in general does not improve things a
# lot. -reduce is on by default for pngcrush above 1.8.0, but we're
# still on an older version (1.7.85 at the time of writing this
# comment, because that's what comes with Debian stretch that is used
# for our docker container).
cmd = [settings.PNGCRUSH_BIN, '-q', '-reduce', '-ow', src, tmp_path]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
log.error('Error optimizing image: %s; %s' % (src, stderr.strip()))
return False
log.info('Image optimization completed for: %s' % src)
return True
except Exception as e:
log.error('Error optimizing image: %s; %s' % (src, e))
return False
def resize_image(source, destination, size=None):
"""Resizes and image from src, to dst.
Returns a tuple of new width and height, original width and height.
When dealing with local files it's up to you to ensure that all directories
exist leading up to the dst filename.
"""
if source == destination:
raise Exception(
"source and destination can't be the same: %s" % source)
with storage.open(source, 'rb') as fp:
im = Image.open(fp)
im = im.convert('RGBA')
original_size = im.size
if size:
im = processors.scale_and_crop(im, size)
with storage.open(destination, 'wb') as fp:
# Save the image to PNG in destination file path. Don't keep the ICC
# profile as it can mess up pngcrush badly (mozilla/addons/issues/697).
im.save(fp, 'png', icc_profile=None)
pngcrush_image(destination)
return (im.size, original_size)
def remove_icons(destination):
for size in ADDON_ICON_SIZES:
filename = '%s-%s.png' % (destination, size)
if storage.exists(filename):
storage.delete(filename)
class ImageCheck(object):
def __init__(self, image):
self._img = image
def is_image(self):
try:
self._img.seek(0)
self.img = Image.open(self._img)
# PIL doesn't tell us what errors it will raise at this point,
# just "suitable ones", so let's catch them all.
self.img.verify()
return True
except Exception:
log.error('Error decoding image', exc_info=True)
return False
def is_animated(self, size=100000):
if not self.is_image():
return False
if self.img.format == 'PNG':
self._img.seek(0)
data = ''
while True:
chunk = self._img.read(size)
if not chunk:
break
data += chunk
acTL, IDAT = data.find('acTL'), data.find('IDAT')
if acTL > -1 and acTL < IDAT:
return True
return False
elif self.img.format == 'GIF':
# The image has been verified, and thus the file closed, we need to
# reopen. Check the "verify" method of the Image object:
# http://pillow.readthedocs.io/en/latest/reference/Image.html
self._img.seek(0)
img = Image.open(self._img)
# See the PIL docs for how this works:
# http://www.pythonware.com/library/pil/handbook/introduction.htm
try:
img.seek(1)
except EOFError:
return False
return True
class MenuItem():
"""Refinement item with nestable children for use in menus."""
url, text, selected, children = ('', '', False, [])
def to_language(locale):
"""Like django's to_language, but en_US comes out as en-US."""
# A locale looks like en_US or fr.
if '_' in locale:
return to_language(translation.trans_real.to_language(locale))
# Django returns en-us but we want to see en-US.
elif '-' in locale:
idx = locale.find('-')
return locale[:idx].lower() + '-' + locale[idx + 1:].upper()
else:
return translation.trans_real.to_language(locale)
def get_locale_from_lang(lang):
"""Pass in a language (u'en-US') get back a Locale object courtesy of
Babel. Use this to figure out currencies, bidi, names, etc."""
# Special fake language can just act like English for formatting and such.
# Do the same for 'cak' because it's not in http://cldr.unicode.org/ and
# therefore not supported by Babel - trying to fake the class leads to a
# rabbit hole of more errors because we need valid locale data on disk, to
# get decimal formatting, plural rules etc.
if not lang or lang in ('cak', 'dbg', 'dbr', 'dbl'):
lang = 'en'
return Locale.parse(translation.to_locale(lang))
class HttpResponseSendFile(HttpResponse):
def __init__(self, request, path, content=None, status=None,
content_type='application/octet-stream', etag=None):
self.request = request
self.path = path
super(HttpResponseSendFile, self).__init__('', status=status,
content_type=content_type)
header_path = self.path
if isinstance(header_path, unicode):
header_path = header_path.encode('utf8')
if settings.XSENDFILE:
self[settings.XSENDFILE_HEADER] = header_path
if etag:
self['ETag'] = quote_etag(etag)
def __iter__(self):
if settings.XSENDFILE:
return iter([])
chunk = 4096
fp = open(self.path, 'rb')
if 'wsgi.file_wrapper' in self.request.META:
return self.request.META['wsgi.file_wrapper'](fp, chunk)
else:
self['Content-Length'] = os.path.getsize(self.path)
def wrapper():
while 1:
data = fp.read(chunk)
if not data:
break
yield data
return wrapper()
def cache_ns_key(namespace, increment=False):
"""
Returns a key with namespace value appended. If increment is True, the
namespace will be incremented effectively invalidating the cache.
Memcache doesn't have namespaces, but we can simulate them by storing a
"%(key)s_namespace" value. Invalidating the namespace simply requires
editing that key. Your application will no longer request the old keys,
and they will eventually fall off the end of the LRU and be reclaimed.
"""
ns_key = 'ns:%s' % namespace
if increment:
try:
ns_val = cache.incr(ns_key)
except ValueError:
log.info('Cache increment failed for key: %s. Resetting.' % ns_key)
ns_val = epoch(datetime.datetime.now())
cache.set(ns_key, ns_val, None)
else:
ns_val = cache.get(ns_key)
if ns_val is None:
ns_val = epoch(datetime.datetime.now())
cache.set(ns_key, ns_val, None)
return '%s:%s' % (ns_val, ns_key)
def get_email_backend(real_email=False):
"""Get a connection to an email backend.
If settings.SEND_REAL_EMAIL is False, a debugging backend is returned.
"""
if real_email or settings.SEND_REAL_EMAIL:
backend = None
else:
backend = 'olympia.amo.mail.DevEmailBackend'
return django.core.mail.get_connection(backend)
def escape_all(value):
"""Escape html in JSON value, including nested items.
Only linkify full urls, including a scheme, if "linkify_only_full" is True.
"""
if isinstance(value, basestring):
value = jinja2.escape(force_text(value))
value = linkify_with_outgoing(value)
return value
elif isinstance(value, list):
for i, lv in enumerate(value):
value[i] = escape_all(lv)
elif isinstance(value, dict):
for k, lv in value.iteritems():
value[k] = escape_all(lv)
elif isinstance(value, Translation):
value = jinja2.escape(force_text(value))
return value
class LocalFileStorage(FileSystemStorage):
"""Local storage to an unregulated absolute file path.
Unregulated means that, unlike the default file storage, you can write to
any path on the system if you have access.
Unlike Django's default FileSystemStorage, this class behaves more like a
"cloud" storage system. Specifically, you never have to write defensive
code that prepares for leading directory paths to exist.
"""
def __init__(self, base_url=None):
super(LocalFileStorage, self).__init__(base_url=base_url)
def delete(self, name):
"""Delete a file or empty directory path.
Unlike the default file system storage this will also delete an empty
directory path. This behavior is more in line with other storage
systems like S3.
"""
full_path = self.path(name)
if os.path.isdir(full_path):
os.rmdir(full_path)
else:
return super(LocalFileStorage, self).delete(name)
def _open(self, name, mode='rb'):
if mode.startswith('w'):
parent = os.path.dirname(self.path(name))
try:
# Try/except to prevent race condition raising "File exists".
os.makedirs(parent)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(parent):
pass
else:
raise
return super(LocalFileStorage, self)._open(name, mode=mode)
def path(self, name):
"""Actual file system path to name without any safety checks."""
return os.path.normpath(os.path.join(self.location, force_bytes(name)))
def translations_for_field(field):
"""Return all the translations for a given field.
This returns a dict of locale:localized_string, not Translation objects.
"""
if field is None:
return {}
translation_id = getattr(field, 'id')
qs = Translation.objects.filter(id=translation_id,
localized_string__isnull=False)
translations = dict(qs.values_list('locale', 'localized_string'))
return translations
def attach_trans_dict(model, objs):
"""Put all translations into a translations dict."""
# Get the ids of all the translations we need to fetch.
fields = model._meta.translated_fields
ids = [getattr(obj, f.attname) for f in fields
for obj in objs if getattr(obj, f.attname, None) is not None]
# Get translations in a dict, ids will be the keys. It's important to
# consume the result of sorted_groupby, which is an iterator.
qs = Translation.objects.filter(id__in=ids, localized_string__isnull=False)
all_translations = dict((k, list(v)) for k, v in
sorted_groupby(qs, lambda trans: trans.id))
def get_locale_and_string(translation, new_class):
"""Convert the translation to new_class (making PurifiedTranslations
and LinkifiedTranslations work) and return locale / string tuple."""
converted_translation = new_class()
converted_translation.__dict__ = translation.__dict__
return (converted_translation.locale.lower(),
unicode(converted_translation))
# Build and attach translations for each field on each object.
for obj in objs:
obj.translations = collections.defaultdict(list)
for field in fields:
t_id = getattr(obj, field.attname, None)
field_translations = all_translations.get(t_id, None)
if not t_id or field_translations is None:
continue
obj.translations[t_id] = [
get_locale_and_string(t, field.remote_field.model)
for t in field_translations]
def rm_local_tmp_dir(path):
"""Remove a local temp directory.
This is just a wrapper around shutil.rmtree(). Use it to indicate you are
certain that your executing code is operating on a local temp dir, not a
directory managed by the Django Storage API.
"""
assert path.startswith(settings.TMP_PATH)
return shutil.rmtree(path)
def timer(*func, **kwargs):
"""
Outputs statsd timings for the decorated method, ignored if not
in test suite. It will give us a name that's based on the module name.
It will work without params. Or with the params:
key: a key to override the calculated one
test_only: only time while in test suite (default is True)
"""
key = kwargs.get('key', None)
test_only = kwargs.get('test_only', True)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if test_only and not settings.IN_TEST_SUITE:
return func(*args, **kw)
else:
name = (key if key else
'%s.%s' % (func.__module__, func.__name__))
with statsd.timer('timer.%s' % name):
return func(*args, **kw)
return wrapper
if func:
return decorator(func[0])
return decorator
def find_language(locale):
"""
Return a locale we support, or None.
"""
if not locale:
return None
if locale in settings.AMO_LANGUAGES:
return locale
# Check if locale has a short equivalent.
loc = settings.SHORTER_LANGUAGES.get(locale)
if loc:
return loc
# Check if locale is something like en_US that needs to be converted.
locale = to_language(locale)
if locale in settings.AMO_LANGUAGES:
return locale
return None
def has_links(html):
"""Return True if links (text or markup) are found in the given html."""
# Call bleach.linkify to transform text links to real links, and add some
# content to the ``href`` attribute. If the result is different from the
# initial string, links were found.
class LinkFound(Exception):
pass
def raise_on_link(attrs, new):
raise LinkFound
try:
bleach.linkify(html, callbacks=[raise_on_link])
except LinkFound:
return True
return False
def walkfiles(folder, suffix=''):
"""Iterator over files in folder, recursively."""
return (os.path.join(basename, filename)
for basename, dirnames, filenames in scandir.walk(folder)
for filename in filenames
if filename.endswith(suffix))
def utc_millesecs_from_epoch(for_datetime=None):
"""
Returns millesconds from the Unix epoch in UTC.
If `for_datetime` is None, the current datetime will be used.
"""
if not for_datetime:
for_datetime = datetime.datetime.now()
return calendar.timegm(for_datetime.utctimetuple()) * 1000
class AMOJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Translation):
return force_text(obj)
return super(AMOJSONEncoder, self).default(obj)
|
import os
from setuptools import setup, find_packages
__version__ = None
exec(open('opentaxii/_version.py').read())
def here(*path):
return os.path.join(os.path.dirname(__file__), *path)
def get_file_contents(filename):
with open(here(filename), encoding='utf8') as fp:
return fp.read()
install_requires = get_file_contents('requirements.txt').split()
setup(
name='opentaxii',
description='TAXII server implementation in Python from EclecticIQ',
long_description=get_file_contents('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/EclecticIQ/OpenTAXII',
author='EclecticIQ',
author_email='opentaxii@eclecticiq.com',
version=__version__,
license='BSD License',
packages=find_packages(exclude=['tests']),
include_package_data=True,
package_data={
'opentaxii': ['*.yml']
},
entry_points={
'console_scripts': [
'opentaxii-run-dev = opentaxii.cli.run:run_in_dev_mode',
'opentaxii-create-account = opentaxii.cli.auth:create_account',
'opentaxii-update-account = opentaxii.cli.auth:update_account',
'opentaxii-sync-data = opentaxii.cli.persistence:sync_data_configuration',
'opentaxii-delete-blocks = opentaxii.cli.persistence:delete_content_blocks',
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
from slicc.ast.ExprAST import ExprAST
class MethodCallExprAST(ExprAST):
def __init__(self, slicc, proc_name, expr_ast_vec):
super(MethodCallExprAST, self).__init__(slicc)
self.proc_name = proc_name
self.expr_ast_vec = expr_ast_vec
def generate(self, code):
tmp = self.slicc.codeFormatter()
paramTypes = []
for expr_ast in self.expr_ast_vec:
return_type = expr_ast.generate(tmp)
paramTypes.append(return_type)
obj_type, methodId, prefix = self.generate_prefix(paramTypes)
# generate code
params = []
for expr_ast in self.expr_ast_vec:
return_type,tcode = expr_ast.inline(True)
params.append(str(tcode))
fix = code.nofix()
code("$prefix${{self.proc_name}}(${{', '.join(params)}}))")
code.fix(fix)
# Verify that this is a method of the object
if methodId not in obj_type.methods:
self.error("Invalid method call: Type '%s' does not have a method '%s'",
obj_type, methodId)
if len(self.expr_ast_vec) != \
len(obj_type.methods[methodId].param_types):
# Right number of parameters
self.error("Wrong number of parameters for function name: '%s', " + \
"expected: , actual: ", proc_name,
len(obj_type.methods[methodId].param_types),
len(self.expr_ast_vec))
for actual_type, expected_type in \
zip(paramTypes, obj_type.methods[methodId].param_types):
if actual_type != expected_type and \
str(actual_type["interface"]) != str(expected_type):
self.error("Type mismatch: expected: %s actual: %s",
expected_type, actual_type)
# Return the return type of the method
return obj_type.methods[methodId].return_type
def findResources(self, resources):
pass
class MemberMethodCallExprAST(MethodCallExprAST):
def __init__(self, slicc, obj_expr_ast, proc_name, expr_ast_vec):
s = super(MemberMethodCallExprAST, self)
s.__init__(slicc, proc_name, expr_ast_vec)
self.obj_expr_ast = obj_expr_ast
def __repr__(self):
return "[MethodCallExpr: %r%r %r]" % (self.proc_name,
self.obj_expr_ast,
self.expr_ast_vec)
def generate_prefix(self, paramTypes):
code = self.slicc.codeFormatter()
# member method call
obj_type = self.obj_expr_ast.generate(code)
methodId = obj_type.methodId(self.proc_name, paramTypes)
prefix = ""
implements_interface = False
if methodId not in obj_type.methods:
#
# The initial method check has failed, but before generating an
# error we must check whether any of the paramTypes implement
# an interface. If so, we must check if the method ids using
# the inherited types exist.
#
# This code is a temporary fix and only checks for the methodId
# where all paramTypes are converted to their inherited type. The
# right way to do this is to replace slicc's simple string
# comparison for determining the correct overloaded method, with a
# more robust param by param check.
#
implemented_paramTypes = []
for paramType in paramTypes:
implemented_paramType = paramType
if paramType.isInterface:
implements_interface = True
implemented_paramType.abstract_ident = paramType["interface"]
else:
implemented_paramType.abstract_ident = paramType.c_ident
implemented_paramTypes.append(implemented_paramType)
if implements_interface:
implementedMethodId = obj_type.methodIdAbstract(self.proc_name,
implemented_paramTypes)
else:
implementedMethodId = ""
if implementedMethodId not in obj_type.methods:
self.error("Invalid method call: " \
"Type '%s' does not have a method '%s' nor '%s'",
obj_type, methodId, implementedMethodId)
else:
#
# Replace the methodId with the implementedMethodId found in
# the method list.
#
methodId = implementedMethodId
return_type = obj_type.methods[methodId].return_type
if return_type.isInterface:
prefix = "static_cast<%s &>" % return_type.c_ident
prefix = "%s((%s)." % (prefix, code)
return obj_type, methodId, prefix
class ClassMethodCallExprAST(MethodCallExprAST):
def __init__(self, slicc, type_ast, proc_name, expr_ast_vec):
s = super(ClassMethodCallExprAST, self)
s.__init__(slicc, proc_name, expr_ast_vec)
self.type_ast = type_ast
def __repr__(self):
return "[MethodCallExpr: %r %r]" % (self.proc_name, self.expr_ast_vec)
def generate_prefix(self, paramTypes):
# class method call
prefix = "(%s::" % self.type_ast
obj_type = self.type_ast.type
methodId = obj_type.methodId(self.proc_name, paramTypes)
return obj_type, methodId, prefix
__all__ = [ "MemberMethodCallExprAST", "ClassMethodCallExprAST" ]
|
"""
The tightest known upper bound on two-way secret key agreement rate.
"""
from __future__ import division
from .base_skar_optimizers import BaseTwoPartIntrinsicMutualInformation
from ... import Distribution
__all__ = [
'two_part_intrinsic_total_correlation',
'two_part_intrinsic_dual_total_correlation',
'two_part_intrinsic_CAEKL_mutual_information',
]
class TwoPartIntrinsicTotalCorrelation(BaseTwoPartIntrinsicMutualInformation):
"""
Compute the two part intrinsic total correlation.
"""
name = 'total correlation'
def measure(self, rvs, crvs):
"""
The total correlation.
Parameters
----------
rvs : iterable of iterables
The random variables.
crvs : iterable
The variables to condition on.
Returns
-------
tc : func
The total correlation.
"""
return self._total_correlation(rvs, crvs)
two_part_intrinsic_total_correlation = TwoPartIntrinsicTotalCorrelation.functional()
class TwoPartIntrinsicDualTotalCorrelation(BaseTwoPartIntrinsicMutualInformation):
"""
Compute the two part intrinsic dual total correlation.
"""
name = 'dual total correlation'
def measure(self, rvs, crvs):
"""
The dual total correlation, also known as the binding information.
Parameters
----------
rvs : iterable of iterables
The random variables.
crvs : iterable
The variables to condition on.
Returns
-------
dtc : float
The dual total correlation.
"""
return self._dual_total_correlation(rvs, crvs)
two_part_intrinsic_dual_total_correlation = TwoPartIntrinsicDualTotalCorrelation.functional()
class TwoPartIntrinsicCAEKLMutualInformation(BaseTwoPartIntrinsicMutualInformation):
"""
Compute the two part intrinsic CAEKL mutual information.
"""
name = 'CAEKL mutual information'
def measure(self, rvs, crvs):
"""
The CAEKL mutual information.
Parameters
----------
rvs : iterable of iterables
The random variables.
crvs : iterable
The variables to condition on.
Returns
-------
caekl : float
The CAEKL mutual information.
"""
return self._caekl_mutual_information(rvs, crvs)
two_part_intrinsic_CAEKL_mutual_information = TwoPartIntrinsicCAEKLMutualInformation.functional()
def two_part_intrinsic_mutual_information_constructor(func): # pragma: no cover
"""
Given a measure of shared information, construct an optimizer which computes
its ``two part intrinsic'' form.
Parameters
----------
func : func
A function which computes the information shared by a set of variables.
It must accept the arguments `rvs' and `crvs'.
Returns
-------
TPIMI : BaseTwoPartIntrinsicMutualInformation
An two part intrinsic mutual information optimizer using `func` as the
measure of multivariate mutual information.
Notes
-----
Due to the casting to a Distribution for processing, optimizers constructed
using this function will be significantly slower than if the objective were
written directly using the joint probability ndarray.
"""
class TwoPartIntrinsicMutualInformation(BaseTwoPartIntrinsicMutualInformation):
name = func.__name__
def measure(self, rvs, crvs): # pragma: no cover
"""
Dummy method.
"""
pass
def objective(self, x):
pmf = self.construct_joint(x)
d = Distribution.from_ndarray(pmf)
mi = func(d, rvs=[[rv] for rv in self._rvs], crvs=self._j)
cmi1 = self._conditional_mutual_information(self._u, self._j, self._v)(pmf)
cmi1 = self._conditional_mutual_information(self._u, self._crvs, self._v)(pmf)
return mi + cmi1 - cmi2
TwoPartIntrinsicMutualInformation.__doc__ = \
"""
Compute the two part intrinsic {name}.
""".format(name=func.__name__)
docstring = \
"""
Compute the {name}.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The {name}-based objective function.
""".format(name=func.__name__)
try:
# python 2
TwoPartIntrinsicMutualInformation.objective.__func__.__doc__ = docstring
except AttributeError:
# python 3
TwoPartIntrinsicMutualInformation.objective.__doc__ = docstring
return TwoPartIntrinsicMutualInformation
|
from __future__ import division, print_function, unicode_literals, \
absolute_import
import os
import abc
import io
import subprocess
import itertools
import six
import numpy as np
from monty.tempfile import ScratchDir
from pymatgen import Element
from pymatgen.io.lammps.data import LammpsData
from veidt.potential.abstract import Potential
_sort_elements = lambda symbols: [e.symbol for e in
sorted([Element(e) for e in symbols])]
def _pretty_input(lines):
clean_lines = [l.strip('\n') for l in lines]
commands = [l for l in clean_lines if len(l.strip()) > 0]
keys = [c.split()[0] for c in commands
if not c.split()[0].startswith('#')]
width = max([len(k) for k in keys]) + 4
prettify = lambda l: l.split()[0].ljust(width) + ' '.join(l.split()[1:]) \
if not (len(l.split()) == 0 or l.strip().startswith('#')) else l
new_lines = map(prettify, clean_lines)
return '\n'.join(new_lines)
def _read_dump(file_name, dtype='float_'):
with open(file_name) as f:
lines = f.readlines()[9:]
return np.loadtxt(io.StringIO(''.join(lines)), dtype=dtype)
class LMPStaticCalculator(six.with_metaclass(abc.ABCMeta, object)):
"""
Abstract class to perform static structure property calculation
using LAMMPS.
"""
LMP_EXE = 'lmp_serial'
_COMMON_CMDS = ['units metal',
'atom_style charge',
'box tilt large',
'read_data data.static',
'run 0']
@abc.abstractmethod
def _setup(self):
"""
Setup a calculation, writing input files, etc.
"""
return
@abc.abstractmethod
def _sanity_check(self, structure):
"""
Check if the structure is valid for this calculation.
"""
return
@abc.abstractmethod
def _parse(self):
"""
Parse results from dump files.
"""
return
def calculate(self, structures):
"""
Perform the calculation on a series of structures.
Args:
structures [Structure]: Input structures in a list.
Returns:
List of computed data corresponding to each structure,
varies with different subclasses.
"""
for s in structures:
assert self._sanity_check(s) is True, \
'Incompatible structure found'
ff_elements = None
if hasattr(self, 'element_profile'):
ff_elements = self.element_profile.keys()
with ScratchDir('.'):
input_file = self._setup()
data = []
for s in structures:
ld = LammpsData.from_structure(s, ff_elements)
ld.write_file('data.static')
p = subprocess.Popen([self.LMP_EXE, '-in', input_file],
stdout=subprocess.PIPE)
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'LAMMPS exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except:
error_msg += msg[-1]
raise RuntimeError(error_msg)
results = self._parse()
data.append(results)
return data
class EnergyForceStress(LMPStaticCalculator):
"""
Calculate energy, forces and virial stress of structures.
"""
def __init__(self, ff_settings):
"""
Args:
ff_settings (list/Potential): Configure the force field settings for LAMMPS
calculation, if given a Potential object, should apply
Potential.write_param method to get the force field setting.
"""
self.ff_settings = ff_settings
def _setup(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates', 'efs')
with open(os.path.join(template_dir, 'in.efs'), 'r') as f:
input_template = f.read()
input_file = 'in.efs'
if isinstance(self.ff_settings, Potential):
ff_settings = self.ff_settings.write_param()
else:
ff_settings = self.ff_settings
with open(input_file, 'w') as f:
f.write(input_template.format(ff_settings='\n'.join(ff_settings)))
return input_file
def _sanity_check(self, structure):
return True
def _parse(self):
energy = float(np.loadtxt('energy.txt'))
force = _read_dump('force.dump')
stress = np.loadtxt('stress.txt')
return energy, force, stress
class SpectralNeighborAnalysis(LMPStaticCalculator):
"""
Calculator for bispectrum components to characterize the local
neighborhood of each atom in a general way.
Usage:
[(b, db, e)] = sna.calculate([Structure])
b: 2d NumPy array with shape (N, n_bs) containing bispectrum
coefficients, where N is the No. of atoms in structure and
n_bs is the No. of bispectrum components.
db: 2d NumPy array with shape (N, 3 * n_bs * n_elements)
containing the first order derivatives of bispectrum
coefficients with respect to atomic coordinates,
where n_elements is the No. of elements in element_profile.
e: 2d NumPy array with shape (N, 1) containing the element of
each atom.
"""
_CMDS = ['pair_style lj/cut 10',
'pair_coeff * * 1 1',
'compute sna all sna/atom ',
'compute snad all snad/atom ',
'compute snav all snav/atom ',
'dump 1 all custom 1 dump.element element',
'dump 2 all custom 1 dump.sna c_sna[*]',
'dump 3 all custom 1 dump.snad c_snad[*]',
'dump 4 all custom 1 dump.snav c_snav[*]']
def __init__(self, rcutfac, twojmax, element_profile, rfac0=0.99363,
rmin0=0, diagonalstyle=3, quadratic=False):
"""
For more details on the parameters, please refer to the
official documentation of LAMMPS.
Notes:
Despite this calculator uses compute sna(d)/atom command
(http://lammps.sandia.gov/doc/compute_sna_atom.html), the
parameter definition is in consistent with pair_style snap
document (http://lammps.sandia.gov/doc/pair_snap.html),
where *rcutfac* is the cutoff in distance rather than some
scale factor.
Args:
rcutfac (float): Global cutoff distance.
twojmax (int): Band limit for bispectrum components.
element_profile (dict): Parameters (cutoff factor 'r' and
weight 'w') related to each element, e.g.,
{'Na': {'r': 0.3, 'w': 0.9},
'Cl': {'r': 0.7, 'w': 3.0}}
rfac0 (float): Parameter in distance to angle conversion.
Set between (0, 1), default to 0.99363.
rmin0 (float): Parameter in distance to angle conversion.
Default to 0.
diagonalstyle (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3,
default to 3.
quadratic (bool): Whether including quadratic terms.
Default to False.
"""
self.rcutfac = rcutfac
self.twojmax = twojmax
self.element_profile = element_profile
self.rfac0 = rfac0
self.rmin0 = rmin0
assert diagonalstyle in range(4), 'Invalid diagonalstype, ' \
'choose among 0, 1, 2 and 3'
self.diagonalstyle = diagonalstyle
self.quadratic = quadratic
@staticmethod
def get_bs_subscripts(twojmax, diagonal):
"""
Method to list the subscripts 2j1, 2j2, 2j of bispectrum
components.
Args:
twojmax (int): Band limit for bispectrum components.
diagonal (int): Parameter defining which bispectrum
components are generated. Choose among 0, 1, 2 and 3.
Returns:
List of all subscripts [2j1, 2j2, 2j].
"""
subs = itertools.product(range(twojmax + 1), repeat=3)
filters = [lambda x: True if x[0] >= x[1] else False]
if diagonal == 2:
filters.append(lambda x: True if x[0] == x[1] == x[2] else False)
else:
if diagonal == 1:
filters.append(lambda x: True if x[0] == x[1] else False)
elif diagonal == 3:
filters.append(lambda x: True if x[2] >= x[0] else False)
elif diagonal == 0:
pass
j_filter = lambda x: True if \
x[2] in range(x[0] - x[1], min(twojmax, x[0] + x[1]) + 1, 2)\
else False
filters.append(j_filter)
for f in filters:
subs = filter(f, subs)
return list(subs)
@property
def n_bs(self):
"""
Returns No. of bispectrum components to be calculated.
"""
return len(self.get_bs_subscripts(self.twojmax, self.diagonalstyle))
def _setup(self):
compute_args = '{} {} {} '.format(1, self.rfac0, self.twojmax)
el_in_seq = _sort_elements(self.element_profile.keys())
cutoffs = [self.element_profile[e]['r'] * self.rcutfac
for e in el_in_seq]
weights = [self.element_profile[e]['w'] for e in el_in_seq]
compute_args += ' '.join([str(p) for p in cutoffs + weights])
qflag = 1 if self.quadratic else 0
compute_args += ' diagonal {} rmin0 {} quadraticflag {}'.\
format(self.diagonalstyle, self.rmin0, qflag)
add_args = lambda l: l + compute_args if l.startswith('compute') \
else l
CMDS = list(map(add_args, self._CMDS))
CMDS[2] += ' bzeroflag 0'
CMDS[3] += ' bzeroflag 0'
CMDS[4] += ' bzeroflag 0'
dump_modify = 'dump_modify 1 element '
dump_modify += ' '.join(str(e) for e in el_in_seq)
CMDS.append(dump_modify)
ALL_CMDS = self._COMMON_CMDS[:]
ALL_CMDS[-1:-1] = CMDS
input_file = 'in.sna'
with open('in.sna', 'w') as f:
f.write(_pretty_input(ALL_CMDS).format(self.twojmax, self.rfac0))
return input_file
def _sanity_check(self, structure):
struc_elements = set(structure.symbol_set)
sna_elements = self.element_profile.keys()
return struc_elements.issubset(sna_elements)
def _parse(self):
element = np.atleast_1d(_read_dump('dump.element', 'unicode'))
b = np.atleast_2d(_read_dump('dump.sna'))
db = np.atleast_2d(_read_dump('dump.snad'))
vb = np.atleast_2d(_read_dump('dump.snav'))
return b, db, vb, element
class ElasticConstant(LMPStaticCalculator):
"""
Elastic constant calculator.
"""
_RESTART_CONFIG = {'internal': {'write_command': 'write_restart',
'read_command': 'read_restart',
'restart_file': 'restart.equil'},
'external': {'write_command': 'write_data',
'read_command': 'read_data',
'restart_file': 'data.static'}}
def __init__(self, ff_settings, potential_type='external',
deformation_size=1e-6, jiggle=1e-5, lattice='bcc', alat=5.0,
maxiter=400, maxeval=1000):
"""
Args:
ff_settings (list/Potential): Configure the force field settings for LAMMPS
calculation, if given a Potential object, should apply
Potential.write_param method to get the force field setting.
potential_type (str): 'internal' indicates the internal potential
installed in lammps, 'external' indicates the external potential
outside of lammps.
deformation_size (float): Finite deformation size. Usually range from
1e-2 to 1e-8, to confirm the results not depend on it.
jiggle (float): The amount of random jiggle for atoms to
prevent atoms from staying on saddle points.
lattice (str): The lattice type of structure. e.g. bcc or diamond.
alat (float): The lattice constant of specific lattice and specie.
maxiter (float): The maximum number of iteration. Default to 400.
maxeval (float): The maximum number of evaluation. Default to 1000.
"""
self.ff_settings = ff_settings
self.write_command = self._RESTART_CONFIG[potential_type]['write_command']
self.read_command = self._RESTART_CONFIG[potential_type]['read_command']
self.restart_file = self._RESTART_CONFIG[potential_type]['restart_file']
self.deformation_size = deformation_size
self.jiggle = jiggle
self.lattice = lattice
self.alat = alat
self.maxiter = maxiter
self.maxeval = maxeval
def _setup(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates', 'elastic')
with open(os.path.join(template_dir, 'in.elastic'), 'r') as f:
input_template = f.read()
with open(os.path.join(template_dir, 'init.template'), 'r') as f:
init_template = f.read()
with open(os.path.join(template_dir, 'potential.template'), 'r') as f:
potential_template = f.read()
with open(os.path.join(template_dir, 'displace.template'), 'r') as f:
displace_template = f.read()
input_file = 'in.elastic'
if isinstance(self.ff_settings, Potential):
ff_settings = self.ff_settings.write_param()
else:
ff_settings = self.ff_settings
with open(input_file, 'w') as f:
f.write(input_template.format(write_restart=self.write_command,
restart_file=self.restart_file))
with open('init.mod', 'w') as f:
f.write(init_template.format(deformation_size=self.deformation_size,
jiggle=self.jiggle, maxiter=self.maxiter,
maxeval=self.maxeval, lattice=self.lattice,
alat=self.alat))
with open('potential.mod', 'w') as f:
f.write(potential_template.format(ff_settings='\n'.join(ff_settings)))
with open('displace.mod', 'w') as f:
f.write(displace_template.format(read_restart=self.read_command,
restart_file=self.restart_file))
return input_file
def calculate(self):
with ScratchDir('.'):
input_file = self._setup()
p = subprocess.Popen([self.LMP_EXE, '-in', input_file],
stdout=subprocess.PIPE)
stdout = p.communicate()[0]
rc = p.returncode
if rc != 0:
error_msg = 'LAMMPS exited with return code %d' % rc
msg = stdout.decode("utf-8").split('\n')[:-1]
try:
error_line = [i for i, m in enumerate(msg)
if m.startswith('ERROR')][0]
error_msg += ', '.join([e for e in msg[error_line:]])
except:
error_msg += msg[-1]
raise RuntimeError(error_msg)
result = self._parse()
return result
def _sanity_check(self, structure):
"""
Check if the structure is valid for this calculation.
"""
return True
def _parse(self):
"""
Parse results from dump files.
"""
C11, C12, C44, bulkmodulus = np.loadtxt('elastic.txt')
return C11, C12, C44, bulkmodulus
class LatticeConstant(LMPStaticCalculator):
"""
Lattice Constant Relaxation Calculator.
"""
def __init__(self, ff_settings):
"""
Args:
ff_settings (list/Potential): Configure the force field settings for LAMMPS
calculation, if given a Potential object, should apply
Potential.write_param method to get the force field setting.
"""
self.ff_settings = ff_settings
def _setup(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates', 'latt')
with open(os.path.join(template_dir, 'in.latt'), 'r') as f:
input_template = f.read()
input_file = 'in.latt'
if isinstance(self.ff_settings, Potential):
ff_settings = self.ff_settings.write_param()
else:
ff_settings = self.ff_settings
with open(input_file, 'w') as f:
f.write(input_template.format(ff_settings='\n'.join(ff_settings)))
return input_file
def _sanity_check(self, structure):
"""
Check if the structure is valid for this calculation.
"""
return True
def _parse(self):
"""
Parse results from dump files.
"""
a, b, c = np.loadtxt('lattice.txt')
return a, b, c
class TimeBenchmarker(LMPStaticCalculator):
"""
Time benchmark calculator.
"""
def __init__(self, ff_settings):
"""
Args:
ff_settings (list/Potential): Configure the force field settings for LAMMPS
calculation, if given a Potential object, should apply
Potential.write_param method to get the force field setting.
"""
self.ff_settings = ff_settings
def _setup(self):
template_dir = os.path.join(os.path.dirname(__file__), 'templates', 'time_benchmark')
with open(os.path.join(template_dir, 'in.time'), 'r') as f:
input_template = f.read()
input_file = 'in.time'
if isinstance(self.ff_settings, Potential):
ff_settings = self.ff_settings.write_param()
else:
ff_settings = self.ff_settings
with open(input_file, 'w') as f:
f.write(input_template.format(ff_settings='\n'.join(ff_settings)))
return input_file
def _sanity_check(self, structure):
"""
Check if the structure is valid for this calculation.
"""
return True
def _parse(self):
"""
Parse results from dump file.
"""
return 0
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django_fsm
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('state', django_fsm.FSMField(default=b'open', max_length=50)),
('animal_id', models.CharField(max_length=30, blank=True)),
('alt_id', models.CharField(max_length=30, blank=True)),
('electronic_id', models.CharField(max_length=30, blank=True)),
('ear_tag', models.CharField(max_length=30)),
('name', models.CharField(max_length=30)),
('color', models.CharField(blank=True, max_length=20, choices=[(b'yellow', 'Yellow')])),
('gender', models.CharField(max_length=20, choices=[(b'bull', 'Bull'), (b'cow', 'Cow')])),
('birth_date', models.DateField(null=True, blank=True)),
('birth_weight', models.IntegerField(max_length=4, null=True, blank=True)),
('weaning_date', models.DateField(null=True, blank=True)),
('weaning_weight', models.IntegerField(max_length=4, null=True, blank=True)),
('yearling_date', models.DateField(null=True, blank=True)),
('yearling_weight', models.IntegerField(max_length=4, null=True, blank=True)),
('code', models.CharField(max_length=10, blank=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Breed',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(max_length=30)),
('gestation_period', models.IntegerField(max_length=4)),
('created_by', models.ForeignKey(related_name='animals_breed_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name='animals_breed_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Breeder',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('name', models.CharField(max_length=30)),
('created_by', models.ForeignKey(related_name='animals_breeder_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name='animals_breeder_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MilkProduction',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('time', models.CharField(max_length=10, choices=[(b'morning', 'Morning'), (b'evening', 'Evening')])),
('amount', models.DecimalField(max_digits=5, decimal_places=2)),
('butterfat_ratio', models.DecimalField(max_digits=5, decimal_places=3)),
('animal', models.ForeignKey(related_name='milkproduction', to='animals.Animal')),
('created_by', models.ForeignKey(related_name='animals_milkproduction_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name='animals_milkproduction_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PregnancyCheck',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('result', models.CharField(max_length=20, choices=[(b'pregnant', 'Pregnant'), (b'open', 'Open')])),
('check_method', models.CharField(max_length=20, choices=[(b'palpation', 'Palpation'), (b'ultrasound', 'Ultrasound'), (b'observation', 'Observation'), (b'blood', 'Blood')])),
('date', models.DateField()),
('animal', models.ForeignKey(related_name='pregnancy_checks', to='animals.Animal')),
('created_by', models.ForeignKey(related_name='animals_pregnancycheck_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name='animals_pregnancycheck_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_active', models.BooleanField(default=True, help_text=b'Whether this item is active, use this instead of deleting')),
('created_on', models.DateTimeField(help_text=b'When this item was originally created', auto_now_add=True)),
('modified_on', models.DateTimeField(help_text=b'When this item was last modified', auto_now=True)),
('method', models.CharField(default=b'artificial_insemination', max_length=30, choices=[(b'artificial_insemination', 'Artificial Insemination'), (b'natural_service', 'Natural Service')])),
('date', models.DateField()),
('notes', models.CharField(max_length=200, blank=True)),
('animal', models.ForeignKey(related_name='animal_services', to='animals.Animal')),
('created_by', models.ForeignKey(related_name='animals_service_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item')),
('modified_by', models.ForeignKey(related_name='animals_service_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item')),
('sire', models.ForeignKey(related_name='sire_services', to='animals.Animal')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='pregnancycheck',
name='service',
field=models.ForeignKey(related_name='pregnancy_checks', blank=True, to='animals.Service', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='breed',
field=models.ForeignKey(blank=True, to='animals.Breed', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='breeder',
field=models.ForeignKey(related_name='sires', blank=True, to='animals.Breeder', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='created_by',
field=models.ForeignKey(related_name='animals_animal_creations', to=settings.AUTH_USER_MODEL, help_text=b'The user which originally created this item'),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='dam',
field=models.ForeignKey(related_name='dam_offsprings', blank=True, to='animals.Animal', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='modified_by',
field=models.ForeignKey(related_name='animals_animal_modifications', to=settings.AUTH_USER_MODEL, help_text=b'The user which last modified this item'),
preserve_default=True,
),
migrations.AddField(
model_name='animal',
name='sire',
field=models.ForeignKey(related_name='sire_offsprings', blank=True, to='animals.Animal', null=True),
preserve_default=True,
),
]
|
import htmls
from django import test
from model_bakery import baker
from devilry.devilry_cradmin import devilry_listbuilder
class TestItemValue(test.TestCase):
def test_title_without_fullname(self):
relatedstudent = baker.make('devilry_account.PermissionGroupUser',
user__shortname='test@example.com',
user__fullname='')
selector = htmls.S(devilry_listbuilder.permissiongroupuser.ItemValue(value=relatedstudent).render())
self.assertEqual(
'test@example.com',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_title_with_fullname(self):
relatedstudent = baker.make('devilry_account.PermissionGroupUser',
user__fullname='Test User',
user__shortname='test@example.com')
selector = htmls.S(devilry_listbuilder.permissiongroupuser.ItemValue(value=relatedstudent).render())
self.assertEqual(
'Test User',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_description_without_fullname(self):
relatedstudent = baker.make('devilry_account.PermissionGroupUser',
user__shortname='test@example.com',
user__fullname='')
selector = htmls.S(devilry_listbuilder.permissiongroupuser.ItemValue(value=relatedstudent).render())
self.assertFalse(
selector.exists('.cradmin-legacy-listbuilder-itemvalue-titledescription-description'))
def test_description_with_fullname(self):
relatedstudent = baker.make('devilry_account.PermissionGroupUser',
user__fullname='Test User',
user__shortname='test@example.com')
selector = htmls.S(devilry_listbuilder.permissiongroupuser.ItemValue(value=relatedstudent).render())
self.assertEqual(
'test@example.com',
selector.one('.cradmin-legacy-listbuilder-itemvalue-titledescription-description').alltext_normalized)
|
__author__ = 'sandlbn'
from django.conf.urls import url
from views import CalendarJsonListView, CalendarView
urlpatterns = [
url(
r'^json/$',
CalendarJsonListView.as_view(),
name='calendar_json'
),
url(
r'^$',
CalendarView.as_view(),
name='calendar'
),
]
|
"""Tests of the pnacl driver.
This tests that @file (response files) are parsed as a command shell
would parse them (stripping quotes when necessary, etc.)
"""
import driver_tools
import os
import tempfile
import unittest
class TestExpandResponseFile(unittest.TestCase):
def setUp(self):
self.tempfiles = []
def getTemp(self):
# Set delete=False, so that we can close the files and
# re-open them. Windows sometimes does not allow you to
# re-open an already opened temp file.
t = tempfile.NamedTemporaryFile(delete=False)
self.tempfiles.append(t)
return t
def tearDown(self):
for t in self.tempfiles:
if not t.closed:
t.close()
os.remove(t.name)
def test_ShouldExpandCommandFile(self):
"""Test that response files are detected in commandline parser. """
# NOTE: This is currently not just syntactic. We currently require
# that the file exist so we use a tempfile.
t = self.getTemp()
self.assertTrue(driver_tools.ShouldExpandCommandFile(
'@' + t.name))
self.assertTrue(driver_tools.ShouldExpandCommandFile(
'@' + os.path.abspath(t.name)))
# Test that no space can be between @ and file.
self.assertFalse(driver_tools.ShouldExpandCommandFile(
'@ ' + t.name))
self.assertFalse(driver_tools.ShouldExpandCommandFile(
t.name))
# Testing that it's not just syntactic (file must exist).
self.assertFalse(driver_tools.ShouldExpandCommandFile(
'@some_truly_non_existent_file'))
def CheckResponseFileWithQuotedFile(self, file_to_quote):
"""Test using a response file with quotes around the filename.
We make sure that the quotes are stripped so that we do not
attempt to open a file with quotes in its name.
"""
t = self.getTemp()
t.write('-E "%s" -I.. -o out.o\n' % file_to_quote)
# Close to flush and ensure file is reopenable on windows.
t.close()
pre_argv = ['early_arg.c', '@' + t.name, 'later_arg.c']
response_pos = 1
argv = driver_tools.DoExpandCommandFile(pre_argv, response_pos)
self.assertEqual(argv,
['early_arg.c',
'-E', file_to_quote, '-I..', '-o', 'out.o',
'later_arg.c'])
def test_FileWithQuotesWinBackSlash(self):
self.CheckResponseFileWithQuotedFile('C:\\tmp\\myfile.c')
def test_FileWithQuotesWinFwdSlash(self):
self.CheckResponseFileWithQuotedFile('C:/tmp/myfile.c')
def test_FileWithQuotesPosix(self):
self.CheckResponseFileWithQuotedFile('/tmp/myfile.c')
def test_FileWithQuotesWithSpace(self):
self.CheckResponseFileWithQuotedFile('file with space.c')
def test_EmptyFile(self):
t = self.getTemp()
t.close()
pre_argv = ['early_arg.c', '@' + t.name, '-S']
argv = driver_tools.DoExpandCommandFile(pre_argv, 1)
self.assertEqual(argv,
['early_arg.c', '-S'])
def test_MultiLineNoContinuationChar(self):
# Response files can span multiple lines and do not
# require a line continuation char like '\' or '^'.
t = self.getTemp()
t.write('f.c\n')
t.write(' -I.. \n')
t.write(' -c\n')
t.write('-o f.o ')
t.close()
argv = driver_tools.DoExpandCommandFile(['@' + t.name], 0)
self.assertEqual(argv, ['f.c', '-I..', '-c', '-o', 'f.o'])
# TODO(jvoung): Test commandlines with multiple response files
# and recursive response files. This requires refactoring
# the argument parsing to make it more testable (have a
# version that does not modify the global env).
if __name__ == '__main__':
unittest.main()
|
from __future__ import print_function
from collections import namedtuple
from itertools import islice
import types
import os
import re
import argparse
parser = argparse.ArgumentParser(description='Program description.')
parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False,
default=None,
help='full path relative to which paths wills be reported',action='store')
parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True,
help='name of package to import and examine',action='store')
parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False,
help='github project where the code lives, e.g. "pandas-dev/pandas"',
default=None,action='store')
args = parser.parse_args()
Entry=namedtuple("Entry","func path lnum undoc_names missing_args nsig_names ndoc_names")
def entry_gen(root_ns,module_name):
q=[root_ns]
seen=set()
while q:
ns = q.pop()
for x in dir(ns):
cand = getattr(ns,x)
if (isinstance(cand,types.ModuleType)
and cand.__name__ not in seen
and cand.__name__.startswith(module_name)):
# print(cand.__name__)
seen.add(cand.__name__)
q.insert(0,cand)
elif (isinstance(cand,(types.MethodType,types.FunctionType)) and
cand not in seen and cand.__doc__):
seen.add(cand)
yield cand
def cmp_docstring_sig(f):
def build_loc(f):
path=f.__code__.co_filename.split(args.path,1)[-1][1:]
return dict(path=path,lnum=f.__code__.co_firstlineno)
import inspect
sig_names=set(inspect.getargspec(f).args)
doc = f.__doc__.lower()
doc = re.split("^\s*parameters\s*",doc,1,re.M)[-1]
doc = re.split("^\s*returns*",doc,1,re.M)[0]
doc_names={x.split(":")[0].strip() for x in doc.split("\n")
if re.match("\s+[\w_]+\s*:",x)}
sig_names.discard("self")
doc_names.discard("kwds")
doc_names.discard("kwargs")
doc_names.discard("args")
return Entry(func=f,path=build_loc(f)['path'],lnum=build_loc(f)['lnum'],
undoc_names=sig_names.difference(doc_names),
missing_args=doc_names.difference(sig_names),nsig_names=len(sig_names),
ndoc_names=len(doc_names))
def format_id(i):
return i
def format_item_as_github_task_list( i,item,repo):
tmpl = "- [ ] {id}) [{file}:{lnum} ({func_name}())]({link}) - __Missing__[{nmissing}/{total_args}]: {undoc_names}"
link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}"
link = link_tmpl.format(repo=repo,file=item.path ,lnum=item.lnum )
s = tmpl.format(id=i,file=item.path ,
lnum=item.lnum,
func_name=item.func.__name__,
link=link,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s+= " __Extra__(?): {missing_args}".format(missing_args=list(item.missing_args))
return s
def format_item_as_plain(i,item):
tmpl = "+{lnum} {path} {func_name}(): Missing[{nmissing}/{total_args}]={undoc_names}"
s = tmpl.format(path=item.path ,
lnum=item.lnum,
func_name=item.func.__name__,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s+= " Extra(?)={missing_args}".format(missing_args=list(item.missing_args))
return s
def main():
module = __import__(args.module)
if not args.path:
args.path=os.path.dirname(module.__file__)
collect=[cmp_docstring_sig(e) for e in entry_gen(module,module.__name__)]
# only include if there are missing arguments in the docstring (fewer false positives)
# and there are at least some documented arguments
collect = [e for e in collect if e.undoc_names and len(e.undoc_names) != e.nsig_names]
collect.sort(key=lambda x:x.path)
if args.github_repo:
for i,item in enumerate(collect,1):
print( format_item_as_github_task_list(i,item,args.github_repo))
else:
for i,item in enumerate(collect,1):
print( format_item_as_plain(i, item))
if __name__ == "__main__":
import sys
sys.exit(main())
|
'''
Find the contiguous subarray within an array (containing at least one number) which has the largest product.
For example, given the array [2,3,-2,4],
the contiguous subarray [2,3] has the largest product = 6.
'''
class Solution:
# @param num, a list of integers
# @return an integer
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if (size == 0):
return 0
max_product = nums[0]
min_product = nums[0]
curr_max_product = nums[0]
for num in nums[1:]:
if num >= 0:
max_product, min_product = max(num, num * max_product), min(num, num * min_product)
else:
max_product, min_product = max(num, num * min_product), min(num, num * max_product)
curr_max_product = max(curr_max_product, max_product)
return curr_max_product
if __name__ == '__main__':
solution = Solution()
print solution.maxProduct([2,3,-2,4])
|
logfile = 'tmp_output.log' # store all output of all operating system commands
f = open(logfile, 'w'); f.close() # touch logfile so it can be appended
import subprocess, sys
def system(cmd):
"""Run system command cmd."""
print cmd
try:
output = subprocess.check_output(cmd, shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print 'Command\n %s\nfailed.' % cmd
print 'Return code:', e.returncode
print e.output
sys.exit(1)
print output
f = open(logfile, 'a'); f.write(output); f.close()
system('sudo apt-get update --fix-missing')
cmd = """
pyversion=`python -c 'import sys; print sys.version[:3]'`
if [ $pyversion != '2.7' ]; then echo "Python v${pyversion} cannot be used with DocOnce"; exit 1; fi
if [ ! -d srclib ]; then mkdir srclib; fi
"""
system(cmd)
system('sudo apt-get -y install mercurial')
system('sudo apt-get -y install git')
system('sudo apt-get -y install subversion')
system('sudo apt-get -y install python-pip')
system('sudo apt-get -y install idle')
system('sudo apt-get -y install python-dev')
system('sudo apt-get -y install python-pdftools')
system('sudo pip install ipython --upgrade')
system('sudo pip install tornado --upgrade')
system('sudo pip install pyzmq --upgrade')
system('sudo pip install traitlets --upgrade')
system('sudo pip install pickleshare --upgrade')
system('sudo pip install jsonschema')
system('sudo pip install future')
system('sudo pip install mako --upgrade')
system('sudo pip install -e git+https://github.com/hplgit/preprocess#egg=preprocess --upgrade')
system('sudo pip install python-Levenshtein')
system('sudo pip install lxml')
system('sudo pip install -e hg+https://bitbucket.org/logg/publish#egg=publish --upgrade')
system('sudo pip install sphinx')
system('sudo pip install alabaster --upgrade')
system('sudo pip install sphinx_rtd_theme --upgrade')
system('sudo pip install -e hg+https://bitbucket.org/ecollins/cloud_sptheme#egg=cloud_sptheme --upgrade')
system('sudo pip install -e git+https://github.com/ryan-roemer/sphinx-bootstrap-theme#egg=sphinx-bootstrap-theme --upgrade')
system('sudo pip install -e hg+https://bitbucket.org/miiton/sphinxjp.themes.solarized#egg=sphinxjp.themes.solarized --upgrade')
system('sudo pip install -e git+https://github.com/shkumagai/sphinxjp.themes.impressjs#egg=sphinxjp.themes.impressjs --upgrade')
system('sudo pip install -e git+https://github.com/kriskda/sphinx-sagecell#egg=sphinx-sagecell --upgrade')
system('sudo pip install sphinxcontrib-paverutils')
system('sudo pip install paver')
system('sudo pip install cogapp')
system('sudo pip install -e git+https://bitbucket.org/hplbit/pygments-ipython-console#egg=pygments-ipython-console')
system('sudo pip install -e git+https://github.com/hplgit/pygments-doconce#egg=pygments-doconce')
system('sudo pip install beautifulsoup4')
system('sudo pip install html5lib')
cmd = """
cd srclib
svn checkout http://ptex2tex.googlecode.com/svn/trunk/ ptex2tex
cd ptex2tex
sudo python setup.py install
cd latex
sh cp2texmf.sh
cd ../../..
"""
system(cmd)
system('sudo apt-get -y install texinfo')
system('sudo apt-get -y install texlive')
system('sudo apt-get -y install texlive-extra-utils')
system('sudo apt-get -y install texlive-latex-extra')
system('sudo apt-get -y install texlive-latex-recommended')
system('sudo apt-get -y install texlive-math-extra')
system('sudo apt-get -y install texlive-font-utils')
system('sudo apt-get -y install texlive-humanities')
system('sudo apt-get -y install latexdiff')
system('sudo apt-get -y install auctex')
system('sudo apt-get -y install imagemagick')
system('sudo apt-get -y install netpbm')
system('sudo apt-get -y install mjpegtools')
system('sudo apt-get -y install pdftk')
system('sudo apt-get -y install giftrans')
system('sudo apt-get -y install gv')
system('sudo apt-get -y install evince')
system('sudo apt-get -y install smpeg-plaympeg')
system('sudo apt-get -y install mplayer')
system('sudo apt-get -y install totem')
system('sudo apt-get -y install libav-tools')
system('sudo apt-get -y install ispell')
system('sudo apt-get -y install pandoc')
system('sudo apt-get -y install libreoffice')
system('sudo apt-get -y install unoconv')
system('sudo apt-get -y install libreoffice-dmaths')
system('sudo apt-get -y install curl')
system('sudo apt-get -y install a2ps')
system('sudo apt-get -y install wdiff')
system('sudo apt-get -y install meld')
system('sudo apt-get -y install diffpdf')
system('sudo apt-get -y install kdiff3')
system('sudo apt-get -y install diffuse')
print 'Everything is successfully installed!'
|
"""Command-line helper for setuptools and PyPI."""
import os
import os.path as op
import re
import click
import requests
__version__ = '0.1.0'
VERSION_REGEX = re.compile(r"__version__ = '(\d+)\.(\d+)\.(\d+)'")
def lib_name():
"""Return the library name."""
return op.basename(os.getcwd())
def lib_file_path():
"""Return the path to the file that presumably defines `__version__`."""
# Find the file that contains the version.
name = lib_name()
# Try name/__init__.py
path = op.join(name, '__init__.py')
if not op.exists(path):
# Try name.py
path = name + '.py'
return path
def parse_version(contents):
"""Parse the (major, minor, build) version of the library."""
# Parse the version numbers.
m = re.search(VERSION_REGEX, contents)
return map(int, m.groups())
def _pipversion():
url = 'https://pypi.python.org/pypi/%s' % lib_name()
page = requests.get(url).text
name = lib_name()
r = re.search(r'%s (\d+\.\d+\.\d+)' % name, page)
return r.group(1)
def _bump(increment=1):
regex = VERSION_REGEX
path = lib_file_path()
with open(path, 'r') as f:
contents = f.read()
major, minor, build = parse_version(contents)
# Increment the build number.
with open(path, 'w') as f:
build += increment
new_version = "__version__ = '%d.%d.%d'" % (major, minor, build)
contents = re.sub(regex, new_version, contents)
f.write(contents)
return (major, minor, build)
@click.group()
@click.version_option(version=__version__)
@click.help_option('-h', '--help')
def cli():
"""Command-line helper for setuptools and PyPI"""
pass
@cli.command()
def bump():
"""Bump the build number in the version.
`__version__ = 'x.y.z'` => `__version__ = 'x.y.(z+1)'`.
"""
v = _bump(+1)
click.echo("Bumped version to %s." % str(v))
os.system('git diff')
if click.confirm('Commit `Bump version`?'):
os.system('git commit -am "Bump version"')
@cli.command()
def unbump():
"""Like bump, but in the other direction."""
v = _bump(-1)
click.echo("Unbumped version to %s." % str(v))
@cli.command()
def version():
"""Display the library version."""
name = lib_name()
path = lib_file_path()
with open(path, 'r') as f:
contents = f.read()
major, minor, build = parse_version(contents)
pipversion = _pipversion()
click.echo("%s, version %d.%d.%d (%s on PyPI)" % (name,
major, minor, build,
pipversion,
))
@cli.command()
def register():
"""Register the new project."""
os.system('python setup.py register')
@cli.command()
def build():
"""Make builds."""
os.system('python setup.py sdist bdist_wheel')
@cli.command()
def clear():
"""Delete the build and dist subdirectories."""
os.system('rm -rf dist build')
click.echo("Deleted build/ and dist/.")
@cli.command()
@click.pass_context
def release(ctx):
"""Upload the build."""
ctx.invoke(clear)
ctx.invoke(build)
os.system('twine upload dist/*')
@cli.command
def kickstart():
"""Kickstart a new project.
Generate template files for a new Python package.
"""
# TODO
pass
if __name__ == '__main__':
cli()
|
from PySide import QtGui
from jukedj import models
from jukeboxmaya.menu import MenuManager
from jukeboxcore.djadapter import FILETYPES
from jukeboxcore.action import ActionUnit, ActionCollection
from jukeboxcore.release import ReleaseActions
from jukeboxcore.gui.widgets.releasewin import ReleaseWin
from jukeboxmaya.plugins import JB_MayaStandaloneGuiPlugin, MayaPluginManager
from jukeboxmaya.mayapylauncher import mayapy_launcher
from jukeboxmaya.commands import open_scene, save_scene, import_all_references, update_scenenode
from jukeboxmaya.gui.main import maya_main_window
class OptionWidget(QtGui.QWidget):
"""A option widget for the release window.
The user can specify if he wants to import all references.
"""
def __init__(self, parent=None, f=0):
"""
:param parent:
:type parent:
:param f:
:type f:
:raises: None
"""
super(OptionWidget, self).__init__(parent, f)
self.setup_ui()
def setup_ui(self, ):
"""Create all ui elements and layouts
:returns: None
:rtype: None
:raises: None
"""
self.main_vbox = QtGui.QVBoxLayout(self)
self.import_all_references_cb = QtGui.QCheckBox("Import references")
self.main_vbox.addWidget(self.import_all_references_cb)
def import_references(self, ):
"""Return wheter the user specified, that he wants to import references
:returns: True, if references should be imported
:rtype: bool
:raises: None
"""
return self.import_all_references_cb.isChecked()
class SceneReleaseActions(ReleaseActions):
"""Release actions for releasing a scene
Uses the :class:`OptionWidget` for user options.
"""
def __init__(self, ):
"""
:raises: None
"""
super(SceneReleaseActions, self).__init__()
self._option_widget = OptionWidget()
def get_checks(self, ):
"""Get the sanity check actions for a releaes depending on the selected options
:returns: the cleanup actions
:rtype: :class:`jukeboxcore.action.ActionCollection`
:raises: None
"""
return ActionCollection([])
def get_cleanups(self, ):
"""Get the cleanup actions for a releaes depending on the selected options
:returns: the cleanup actions
:rtype: :class:`jukeboxcore.action.ActionCollection`
:raises: None
"""
cleanups = []
open_unit = ActionUnit(name="Open",
description="Open the maya scene.",
actionfunc=open_scene)
cleanups.append(open_unit)
if self._option_widget.import_references():
import_unit = ActionUnit(name="Import references",
description="Import all references in the scene.",
actionfunc=import_all_references,
depsuccess=[open_unit])
cleanups.append(import_unit)
update_scenenode_unit = ActionUnit(name="Update Scene Node",
description="Change the id from the jbscene node from work to releasefile.",
actionfunc=update_scenenode,
depsuccess=[open_unit])
cleanups.append(update_scenenode_unit)
save_unit = ActionUnit(name="Save",
description="Save the scene.",
actionfunc=save_scene,
depsuccess=[update_scenenode_unit])
cleanups.append(save_unit)
return ActionCollection(cleanups)
def option_widget(self, ):
"""Return the option widget of this instance
:returns: the option widget
:rtype: :class:`OptionWidget`
:raises: None
"""
return self._option_widget
class MayaSceneRelease(JB_MayaStandaloneGuiPlugin):
"""A plugin that can release a maya scene
This can be used as a standalone plugin.
"""
required = ('MayaGenesis',)
author = "David Zuber"
copyright = "2014"
version = "0.1"
description = "Release Maya scenes"
def init(self, ):
"""Initialize the plugin. Do nothing.
This function gets called when the plugin is loaded by the plugin manager.
:returns:
:rtype:
:raises:
"""
pass
def uninit(self, ):
"""Uninitialize the plugin. Do nothing
This function gets called when the plugin is unloaded by the plugin manager.
:returns:
:rtype:
:raises:
"""
pass
def init_ui(self, ):
"""Create the menu Release under Jukebox menu.
:returns: None
:rtype: None
:raises: None
"""
self.mm = MenuManager.get()
p = self.mm.menus['Jukebox']
self.menu = self.mm.create_menu("Release", p, command=self.run_external)
def uninit_ui(self, ):
"""Delete the Release menu
:returns: None
:rtype: None
:raises: None
"""
self.mm.delete_menu(self.menu)
def run_external(self, *args, **kwargs):
"""Run the Releasewin in another process
:returns: None
:rtype: None
:raises: None
"""
mayapy_launcher(["launch", "MayaSceneRelease"], wait=False)
def run(self, ):
"""Start the configeditor
:returns: None
:rtype: None
:raises: None
"""
ra = SceneReleaseActions()
mayawin = maya_main_window()
self.rw = ReleaseWin(FILETYPES["mayamainscene"], parent=mayawin)
self.rw.set_release_actions(ra)
pm = MayaPluginManager.get()
genesis = pm.get_plugin("MayaGenesis")
c = genesis.get_config()
try:
f = models.TaskFile.objects.get(pk=c['lastfile'])
except models.TaskFile.DoesNotExist:
pass
else:
if f.releasetype == 'work':
self.rw.browser.set_selection(f)
self.rw.show()
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class SimpleTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'simpletag-renamed01': '{% load custom %}{% minusone 7 %}'})
def test_simpletag_renamed01(self):
output = self.engine.render_to_string('simpletag-renamed01')
self.assertEqual(output, '6')
@setup({'simpletag-renamed02': '{% load custom %}{% minustwo 7 %}'})
def test_simpletag_renamed02(self):
output = self.engine.render_to_string('simpletag-renamed02')
self.assertEqual(output, '5')
@setup({'simpletag-renamed03': '{% load custom %}{% minustwo_overridden_name 7 %}'})
def test_simpletag_renamed03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('simpletag-renamed03')
|
from __future__ import print_function
import os
import sqlite3
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from trees.models import FVSAggregate
def get_gyb_rows(db_path, fields, arraysize=1000):
'''
Fetches GYB data from the sqlite db
yields rows as a python dict
uses fetchmany to keep memory usage down
'''
sql = "SELECT {} FROM trees_fvsaggregate;".format(
', '.join(['"{}"'.format(x) for x in fields]))
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute(sql)
i = 0
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
i += 1
res = dict(result)
# Special case, adjust semantics of offset
# Offset from GYB is integer years
# Offset in FP is integer in set(0,1,2,3,4) where offset=1 is 5 years, etc
res['offset'] = int(res['offset']) / 5
yield res
print("inserted {} rows...".format(i))
class Command(BaseCommand):
help = 'Imports GYB database into the fvsaggregate table'
args = '[db_path]'
def handle(self, *args, **options):
try:
db_path = args[0]
assert os.path.exists(db_path)
except (AssertionError, IndexError):
raise CommandError("Specify path for gyb sqlite database (data.db)")
# confirm that database contains a viable trees_fvsaggregate table
gybconn = sqlite3.connect(db_path)
gybcursor = gybconn.cursor()
gybcursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [x[0] for x in gybcursor.fetchall()]
if 'trees_fvsaggregate' not in tables:
raise CommandError("trees_fvsaggregate table not found in {}".format(db_path))
# Confirm that gyb's schema is sufficient to provide all FP schema fields
gybcursor.execute("PRAGMA table_info(trees_fvsaggregate);")
gyb_fieldnames = [x[1] for x in gybcursor.fetchall()]
pgcursor = connection.cursor()
try:
pgcursor.execute("SELECT * FROM trees_fvsaggregate LIMIT 0;")
# don't track id, autosequenced
pg_fields = [desc for desc in pgcursor.description if desc[0] != 'id']
finally:
pgcursor.close()
pg_fieldnames = [x[0] for x in pg_fields]
# postgres schema is only allowed to deviate from gyb as follows...
# special cases described in match_case function below
assert set(pg_fieldnames) - set([x.lower() for x in gyb_fieldnames]) == set(['pp_btl', 'lp_btl'])
pg_insert_cols = ", ".join(['"{}"'.format(f) for f in pg_fieldnames])
def match_case(pgname):
"""return case-sensitive key name from sqlite given a postgres field name"""
for gybfield in gyb_fieldnames:
if pgname == gybfield.lower():
return gybfield
# not present, special cases
# If forestplanner schema calls for pp_btl or lp_btl
# substitute PINEBTL
if pgname == 'pp_btl':
return "PINEBTL"
if pgname == 'lp_btl':
return "PINEBTL"
raise Exception("Can't find {} in sqlite fields".format(pgname))
gyb_values_template = ", ".join(
["%({})s".format(match_case(x[0])) for x in pg_fields]
)
query_template = """INSERT INTO trees_fvsaggregate ({})
VALUES ({});""".format(pg_insert_cols, gyb_values_template)
pgcursor = connection.cursor()
try:
with transaction.commit_on_success():
pgcursor.executemany(query_template, get_gyb_rows(db_path, gyb_fieldnames))
finally:
pgcursor.close()
print("Recaching valid_condids.")
FVSAggregate.recache()
|
"""Admin extensions for django-reversion."""
from __future__ import unicode_literals
from contextlib import contextmanager
from django.db import models, transaction, connection
from django.conf.urls import url
from django.contrib import admin
from django.contrib.admin import options
from django.contrib.admin.utils import unquote, quote
try:
from django.contrib.contenttypes.admin import GenericInlineModelAdmin
from django.contrib.contenttypes.fields import GenericRelation
except ImportError: # Django < 1.9 pragma: no cover
from django.contrib.contenttypes.generic import GenericInlineModelAdmin, GenericRelation
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.shortcuts import get_object_or_404, render
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.utils.formats import localize
from reversion.models import Version
from reversion.revisions import default_revision_manager
class RollBackRevisionView(Exception):
pass
class VersionAdmin(admin.ModelAdmin):
"""Abstract admin class for handling version controlled models."""
object_history_template = "reversion/object_history.html"
change_list_template = "reversion/change_list.html"
revision_form_template = None
recover_list_template = None
recover_form_template = None
# The revision manager instance used to manage revisions.
revision_manager = default_revision_manager
# The serialization format to use when registering models with reversion.
reversion_format = "json"
# Whether to ignore duplicate revision data.
ignore_duplicate_revisions = False
# If True, then the default ordering of object_history and recover lists will be reversed.
history_latest_first = False
# Revision helpers.
@property
def revision_context_manager(self):
"""The revision context manager for this VersionAdmin."""
return self.revision_manager._revision_context_manager
def _get_template_list(self, template_name):
opts = self.model._meta
return (
"reversion/%s/%s/%s" % (opts.app_label, opts.object_name.lower(), template_name),
"reversion/%s/%s" % (opts.app_label, template_name),
"reversion/%s" % template_name,
)
def _order_version_queryset(self, queryset):
"""Applies the correct ordering to the given version queryset."""
if self.history_latest_first:
return queryset.order_by("-pk")
return queryset.order_by("pk")
@contextmanager
def _create_revision(self, request):
with transaction.atomic(), self.revision_context_manager.create_revision():
self.revision_context_manager.set_user(request.user)
self.revision_context_manager.set_ignore_duplicates(self.ignore_duplicate_revisions)
yield
# Messages.
def log_addition(self, request, object, change_message=None):
change_message = change_message or _("Initial version.")
self.revision_context_manager.set_comment(change_message)
try:
super(VersionAdmin, self).log_addition(request, object, change_message)
except TypeError: # Django < 1.9 pragma: no cover
super(VersionAdmin, self).log_addition(request, object)
def log_change(self, request, object, message):
self.revision_context_manager.set_comment(message)
super(VersionAdmin, self).log_change(request, object, message)
# Auto-registration.
def _autoregister(self, model, follow=None):
"""Registers a model with reversion, if required."""
if not self.revision_manager.is_registered(model):
follow = follow or []
# Use model_meta.concrete_model to catch proxy models
for parent_cls, field in model._meta.concrete_model._meta.parents.items():
follow.append(field.name)
self._autoregister(parent_cls)
self.revision_manager.register(model, follow=follow, format=self.reversion_format)
def _introspect_inline_admin(self, inline):
"""Introspects the given inline admin, returning a tuple of (inline_model, follow_field)."""
inline_model = None
follow_field = None
fk_name = None
if issubclass(inline, GenericInlineModelAdmin):
inline_model = inline.model
ct_field = inline.ct_field
fk_name = inline.ct_fk_field
for field in self.model._meta.virtual_fields:
if isinstance(field, GenericRelation) and field.rel.to == inline_model and field.object_id_field_name == fk_name and field.content_type_field_name == ct_field:
follow_field = field.name
break
elif issubclass(inline, options.InlineModelAdmin):
inline_model = inline.model
fk_name = inline.fk_name
if not fk_name:
for field in inline_model._meta.fields:
if isinstance(field, (models.ForeignKey, models.OneToOneField)) and issubclass(self.model, field.rel.to):
fk_name = field.name
break
if fk_name and not inline_model._meta.get_field(fk_name).rel.is_hidden():
field = inline_model._meta.get_field(fk_name)
try:
# >=django1.9
remote_field = field.remote_field
except AttributeError:
remote_field = field.related
accessor = remote_field.get_accessor_name()
follow_field = accessor
return inline_model, follow_field, fk_name
def __init__(self, *args, **kwargs):
"""Initializes the VersionAdmin"""
super(VersionAdmin, self).__init__(*args, **kwargs)
# Check that database transactions are supported.
if not connection.features.uses_savepoints: # pragma: no cover
raise ImproperlyConfigured("Cannot use VersionAdmin with a database that does not support savepoints.")
# Automatically register models if required.
if not self.revision_manager.is_registered(self.model):
inline_fields = []
for inline in self.inlines:
inline_model, follow_field, _ = self._introspect_inline_admin(inline)
if inline_model:
self._autoregister(inline_model)
if follow_field:
inline_fields.append(follow_field)
self._autoregister(self.model, inline_fields)
def get_urls(self):
"""Returns the additional urls used by the Reversion admin."""
urls = super(VersionAdmin, self).get_urls()
admin_site = self.admin_site
opts = self.model._meta
info = opts.app_label, opts.model_name,
reversion_urls = [
url("^recover/$", admin_site.admin_view(self.recoverlist_view), name='%s_%s_recoverlist' % info),
url("^recover/([^/]+)/$", admin_site.admin_view(self.recover_view), name='%s_%s_recover' % info),
url("^([^/]+)/history/([^/]+)/$", admin_site.admin_view(self.revision_view), name='%s_%s_revision' % info),]
return reversion_urls + urls
# Views.
def add_view(self, request, form_url='', extra_context=None):
with self._create_revision(request):
return super(VersionAdmin, self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
with self._create_revision(request):
return super(VersionAdmin, self).change_view(request, object_id, form_url, extra_context)
def revisionform_view(self, request, version, template_name, extra_context=None):
try:
with transaction.atomic():
# Revert the revision.
version.revision.revert(delete=True)
# Run the normal changeform view.
with self._create_revision(request):
response = self.changeform_view(request, version.object_id, request.path, extra_context)
# Decide on whether the keep the changes.
if request.method == "POST" and response.status_code == 302:
self.revision_context_manager.set_comment(_("Reverted to previous version, saved on %(datetime)s") % {"datetime": localize(version.revision.date_created)})
else:
response.template_name = template_name # Set the template name to the correct template.
response.render() # Eagerly render the response, so it's using the latest version of the database.
raise RollBackRevisionView # Raise an exception to undo the transaction and the revision.
except RollBackRevisionView:
pass
return response
def recover_view(self, request, version_id, extra_context=None):
"""Displays a form that can recover a deleted model."""
# The revisionform view will check for change permission (via changeform_view),
# but we also need to check for add permissions here.
if not self.has_add_permission(request): # pragma: no cover
raise PermissionDenied
# Render the recover view.
version = get_object_or_404(Version, pk=version_id)
context = {
"title": _("Recover %(name)s") % {"name": version.object_repr},
}
context.update(extra_context or {})
return self.revisionform_view(request, version, self.recover_form_template or self._get_template_list("recover_form.html"), context)
def revision_view(self, request, object_id, version_id, extra_context=None):
"""Displays the contents of the given revision."""
object_id = unquote(object_id) # Underscores in primary key get quoted to "_5F"
version = get_object_or_404(Version, pk=version_id, object_id=object_id)
context = {
"title": _("Revert %(name)s") % {"name": version.object_repr},
}
context.update(extra_context or {})
return self.revisionform_view(request, version, self.revision_form_template or self._get_template_list("revision_form.html"), context)
def changelist_view(self, request, extra_context=None):
"""Renders the change view."""
with self._create_revision(request):
context = {
"has_change_permission": self.has_change_permission(request),
}
context.update(extra_context or {})
return super(VersionAdmin, self).changelist_view(request, context)
def recoverlist_view(self, request, extra_context=None):
"""Displays a deleted model to allow recovery."""
# Check if user has change or add permissions for model
if not self.has_change_permission(request) or not self.has_add_permission(request): # pragma: no cover
raise PermissionDenied
model = self.model
opts = model._meta
deleted = self._order_version_queryset(self.revision_manager.get_deleted(self.model))
# Get the site context.
try:
each_context = self.admin_site.each_context(request)
except TypeError: # Django <= 1.7 pragma: no cover
each_context = self.admin_site.each_context()
# Get the rest of the context.
context = dict(
each_context,
opts = opts,
app_label = opts.app_label,
module_name = capfirst(opts.verbose_name),
title = _("Recover deleted %(name)s") % {"name": force_text(opts.verbose_name_plural)},
deleted = deleted,
)
context.update(extra_context or {})
return render(request, self.recover_list_template or self._get_template_list("recover_list.html"), context)
def history_view(self, request, object_id, extra_context=None):
"""Renders the history view."""
# Check if user has change permissions for model
if not self.has_change_permission(request): # pragma: no cover
raise PermissionDenied
object_id = unquote(object_id) # Underscores in primary key get quoted to "_5F"
opts = self.model._meta
action_list = [
{
"revision": version.revision,
"url": reverse("%s:%s_%s_revision" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(version.object_id), version.id)),
}
for version
in self._order_version_queryset(self.revision_manager.get_for_object_reference(
self.model,
object_id,
).select_related("revision__user"))
]
# Compile the context.
context = {"action_list": action_list}
context.update(extra_context or {})
return super(VersionAdmin, self).history_view(request, object_id, context)
|
import unittest
from simplemonitor import util
from simplemonitor.Alerters import fortysixelks
class Test46Elks(unittest.TestCase):
def test_46elks(self):
config_options = {"username": "a", "password": "b", "target": "c"}
config_options["sender"] = "ab"
with self.assertRaises(util.AlerterConfigurationError):
a = fortysixelks.FortySixElksAlerter(config_options=config_options)
config_options["sender"] = "123456789012"
a = fortysixelks.FortySixElksAlerter(config_options=config_options)
self.assertEqual(a.sender, "12345678901")
|
from pathlib import Path
from niworkflows.reports.core import Report as _Report
class Report(_Report):
def _load_config(self, config):
from yaml import safe_load as load
settings = load(config.read_text())
self.packagename = self.packagename or settings.get("package", None)
# Removed from here: Appending self.packagename to self.root and self.out_dir
# In this version, pass reportlets_dir and out_dir with fmriprep in the path.
if self.subject_id is not None:
self.root = self.root / "sub-{}".format(self.subject_id)
if "template_path" in settings:
self.template_path = config.parent / settings["template_path"]
self.index(settings["sections"])
def run_reports(
out_dir,
subject_label,
run_uuid,
config=None,
reportlets_dir=None,
packagename=None,
):
"""
Run the reports.
.. testsetup::
>>> cwd = os.getcwd()
>>> os.chdir(tmpdir)
>>> from pkg_resources import resource_filename
>>> from shutil import copytree
>>> test_data_path = resource_filename('fmriprep', 'data/tests/work')
>>> testdir = Path(tmpdir)
>>> data_dir = copytree(test_data_path, str(testdir / 'work'))
>>> (testdir / 'fmriprep').mkdir(parents=True, exist_ok=True)
.. doctest::
>>> run_reports(testdir / 'out', '01', 'madeoutuuid', packagename='fmriprep',
... reportlets_dir=testdir / 'work' / 'reportlets' / 'fmriprep')
0
.. testcleanup::
>>> os.chdir(cwd)
"""
return Report(
out_dir,
run_uuid,
config=config,
subject_id=subject_label,
packagename=packagename,
reportlets_dir=reportlets_dir,
).generate_report()
def generate_reports(
subject_list, output_dir, run_uuid, config=None, work_dir=None, packagename=None
):
"""Execute run_reports on a list of subjects."""
reportlets_dir = None
if work_dir is not None:
reportlets_dir = Path(work_dir) / "reportlets"
report_errors = [
run_reports(
output_dir,
subject_label,
run_uuid,
config=config,
packagename=packagename,
reportlets_dir=reportlets_dir,
)
for subject_label in subject_list
]
errno = sum(report_errors)
if errno:
import logging
logger = logging.getLogger("cli")
error_list = ", ".join(
"%s (%d)" % (subid, err)
for subid, err in zip(subject_list, report_errors)
if err
)
logger.error(
"Preprocessing did not finish successfully. Errors occurred while processing "
"data from participants: %s. Check the HTML reports for details.",
error_list,
)
return errno
|
"""
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
import pickle
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_array_less, assert_raises, assert_)
from .test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB
from scipy.stats import multivariate_normal
from scipy.stats import matrix_normal
from scipy.stats import special_ortho_group, ortho_group
from scipy.stats import random_correlation
from scipy.stats import unitary_group
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, multinomial, invwishart, chi2, invgamma
from scipy.stats import norm, uniform
from scipy.stats import ks_2samp, kstest
from scipy.stats import binom
from scipy.integrate import romb
from .common_tests import check_random_state_property
class TestMultivariateNormal(object):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.cdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on scalar data, the cdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
# When evaluated on a single vector, the cdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
cdf = multivariate_normal.cdf(x, mean, cov)
assert_equal(cdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logpdf_default_values(self):
# Check that the log of the pdf is in fact the logpdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logpdf(x)
d2 = multivariate_normal.pdf(x)
# check whether default values are being used
d3 = multivariate_normal.logpdf(x, None, 1)
d4 = multivariate_normal.pdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_logcdf(self):
# Check that the log of the cdf is in fact the logcdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logcdf(x, mean, cov)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_logcdf_default_values(self):
# Check that the log of the cdf is in fact the logcdf
# with default parameters Mean=None and cov = 1
np.random.seed(1234)
x = np.random.randn(5)
d1 = multivariate_normal.logcdf(x)
d2 = multivariate_normal.cdf(x)
# check whether default values are being used
d3 = multivariate_normal.logcdf(x, None, 1)
d4 = multivariate_normal.cdf(x, None, 1)
assert_allclose(d1, np.log(d2))
assert_allclose(d3, np.log(d4))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
desired = multivariate_normal.pdf(X, mean, cov)[i, j]
assert_allclose(actual, desired)
# Repeat for cdf
actual = multivariate_normal.cdf(X[i, j], mean, cov)
desired = multivariate_normal.cdf(X, mean, cov)[i, j]
assert_allclose(actual, desired, atol=1e-5)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
# The same should hold for the cumulative distribution function
d1 = norm.cdf(x, mean, scale)
d2 = multivariate_normal.cdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
assert_allclose(norm_frozen.cdf(x), multivariate_normal.cdf(x, mean, cov))
assert_allclose(norm_frozen.logcdf(x),
multivariate_normal.logcdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = 0.5 * np.ones(n)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
assert_raises(e, multivariate_normal.cdf, x, mean, cov)
assert_raises(e, multivariate_normal.logcdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
# Compare the multivariate cdf with some values precomputed
# in R version 3.3.2 (2016-10-31) on Debian GNU/Linux.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_cdf <- pmnorm(cbind(x,y,z), mu, Sigma)
r_cdf = np.array([0.0017866215, 0.0267142892, 0.0857098761,
0.1063242573, 0.2501068509])
cdf = multivariate_normal.cdf(r, mean, cov)
assert_allclose(cdf, r_cdf, atol=1e-5)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestMatrixNormal(object):
def test_bad_input(self):
# Check that bad inputs raise errors
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
# Incorrect dimensions
assert_raises(ValueError, matrix_normal, np.zeros((5,4,3)))
assert_raises(ValueError, matrix_normal, M, np.zeros(10), V)
assert_raises(ValueError, matrix_normal, M, U, np.zeros(10))
assert_raises(ValueError, matrix_normal, M, U, U)
assert_raises(ValueError, matrix_normal, M, V, V)
assert_raises(ValueError, matrix_normal, M.T, U, V)
# Singular covariance
e = np.linalg.LinAlgError
assert_raises(e, matrix_normal, M, U, np.ones((num_cols, num_cols)))
assert_raises(e, matrix_normal, M, np.ones((num_rows, num_rows)), V)
def test_default_inputs(self):
# Check that default argument handling works
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
Z = np.zeros((num_rows, num_cols))
Zr = np.zeros((num_rows, 1))
Zc = np.zeros((1, num_cols))
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
I1 = np.identity(1)
assert_equal(matrix_normal.rvs(mean=M, rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U).shape,
(num_rows, 1))
assert_equal(matrix_normal.rvs(colcov=V).shape,
(1, num_cols))
assert_equal(matrix_normal.rvs(mean=M, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(mean=M, rowcov=U).shape,
(num_rows, num_cols))
assert_equal(matrix_normal.rvs(rowcov=U, colcov=V).shape,
(num_rows, num_cols))
assert_equal(matrix_normal(mean=M).rowcov, Ir)
assert_equal(matrix_normal(mean=M).colcov, Ic)
assert_equal(matrix_normal(rowcov=U).mean, Zr)
assert_equal(matrix_normal(rowcov=U).colcov, I1)
assert_equal(matrix_normal(colcov=V).mean, Zc)
assert_equal(matrix_normal(colcov=V).rowcov, I1)
assert_equal(matrix_normal(mean=M, rowcov=U).colcov, Ic)
assert_equal(matrix_normal(mean=M, colcov=V).rowcov, Ir)
assert_equal(matrix_normal(rowcov=U, colcov=V).mean, Z)
def test_covariance_expansion(self):
# Check that covariance can be specified with scalar or vector
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
Uv = 0.2*np.ones(num_rows)
Us = 0.2
Vv = 0.1*np.ones(num_cols)
Vs = 0.1
Ir = np.identity(num_rows)
Ic = np.identity(num_cols)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Uv, colcov=Vv).colcov,
0.1*Ic)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).rowcov,
0.2*Ir)
assert_equal(matrix_normal(mean=M, rowcov=Us, colcov=Vs).colcov,
0.1*Ic)
def test_frozen_matrix_normal(self):
for i in range(1,5):
for j in range(1,5):
M = 0.3 * np.ones((i,j))
U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i))
V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j))
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = matrix_normal.rvs(mean=M, rowcov=U, colcov=V,
random_state=1234)
assert_equal(rvs1, rvs2)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
pdf2 = matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(pdf1, pdf2)
logpdf1 = frozen.logpdf(X)
logpdf2 = matrix_normal.logpdf(X, mean=M, rowcov=U, colcov=V)
assert_equal(logpdf1, logpdf2)
def test_matches_multivariate(self):
# Check that the pdfs match those obtained by vectorising and
# treating as a multivariate normal.
for i in range(1,5):
for j in range(1,5):
M = 0.3 * np.ones((i,j))
U = 0.5 * np.identity(i) + 0.5 * np.ones((i,i))
V = 0.7 * np.identity(j) + 0.3 * np.ones((j,j))
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(random_state=1234)
pdf1 = frozen.pdf(X)
logpdf1 = frozen.logpdf(X)
vecX = X.T.flatten()
vecM = M.T.flatten()
cov = np.kron(V,U)
pdf2 = multivariate_normal.pdf(vecX, mean=vecM, cov=cov)
logpdf2 = multivariate_normal.logpdf(vecX, mean=vecM, cov=cov)
assert_allclose(pdf1, pdf2, rtol=1E-10)
assert_allclose(logpdf1, logpdf2, rtol=1E-10)
def test_array_input(self):
# Check array of inputs has the same output as the separate entries.
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
N = 10
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X1 = frozen.rvs(size=N, random_state=1234)
X2 = frozen.rvs(size=N, random_state=4321)
X = np.concatenate((X1[np.newaxis,:,:,:],X2[np.newaxis,:,:,:]), axis=0)
assert_equal(X.shape, (2, N, num_rows, num_cols))
array_logpdf = frozen.logpdf(X)
assert_equal(array_logpdf.shape, (2, N))
for i in range(2):
for j in range(N):
separate_logpdf = matrix_normal.logpdf(X[i,j], mean=M,
rowcov=U, colcov=V)
assert_allclose(separate_logpdf, array_logpdf[i,j], 1E-10)
def test_moments(self):
# Check that the sample moments match the parameters
num_rows = 4
num_cols = 3
M = 0.3 * np.ones((num_rows,num_cols))
U = 0.5 * np.identity(num_rows) + 0.5 * np.ones((num_rows, num_rows))
V = 0.7 * np.identity(num_cols) + 0.3 * np.ones((num_cols, num_cols))
N = 1000
frozen = matrix_normal(mean=M, rowcov=U, colcov=V)
X = frozen.rvs(size=N, random_state=1234)
sample_mean = np.mean(X,axis=0)
assert_allclose(sample_mean, M, atol=0.1)
sample_colcov = np.cov(X.reshape(N*num_rows,num_cols).T)
assert_allclose(sample_colcov, V, atol=0.1)
sample_rowcov = np.cov(np.swapaxes(X,1,2).reshape(
N*num_cols,num_rows).T)
assert_allclose(sample_rowcov, U, atol=0.1)
class TestDirichlet(object):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((2, 7, 7)) / 14
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.ones((2, 2, 7)) / 4
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 3
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((2, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((5, 7)) / 5
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_mean_and_var(self):
alpha = np.array([1., 0.8, 0.2])
d = dirichlet(alpha)
expected_var = [1. / 12., 0.08, 0.03]
expected_mean = [0.5, 0.4, 0.1]
assert_array_almost_equal(d.var(), expected_var)
assert_array_almost_equal(d.mean(), expected_mean)
def test_scalar_values(self):
alpha = np.array([0.2])
d = dirichlet(alpha)
# For alpha of length 1, mean and var should be scalar instead of array
assert_equal(d.mean().ndim, 0)
assert_equal(d.var().ndim, 0)
assert_equal(d.pdf([1.]).ndim, 0)
assert_equal(d.logpdf([1.]).ndim, 0)
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(object):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestMultinomial(object):
def test_logpmf(self):
vals1 = multinomial.logpmf((3,4), 7, (0.3, 0.7))
assert_allclose(vals1, -1.483270127243324, rtol=1e-8)
vals2 = multinomial.logpmf([3, 4], 0, [.3, .7])
assert_allclose(vals2, np.NAN, rtol=1e-8)
vals3 = multinomial.logpmf([3, 4], 0, [-2, 3])
assert_allclose(vals3, np.NAN, rtol=1e-8)
def test_reduces_binomial(self):
# test that the multinomial pmf reduces to the binomial pmf in the 2d
# case
val1 = multinomial.logpmf((3, 4), 7, (0.3, 0.7))
val2 = binom.logpmf(3, 7, 0.3)
assert_allclose(val1, val2, rtol=1e-8)
val1 = multinomial.pmf((6, 8), 14, (0.1, 0.9))
val2 = binom.pmf(6, 14, 0.1)
assert_allclose(val1, val2, rtol=1e-8)
def test_R(self):
# test against the values produced by this R code
# (https://stat.ethz.ch/R-manual/R-devel/library/stats/html/Multinom.html)
# X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
# X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
# X
# apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5)))
n, p = 3, [1./8, 2./8, 5./8]
r_vals = {(0, 0, 3): 0.244140625, (1, 0, 2): 0.146484375,
(2, 0, 1): 0.029296875, (3, 0, 0): 0.001953125,
(0, 1, 2): 0.292968750, (1, 1, 1): 0.117187500,
(2, 1, 0): 0.011718750, (0, 2, 1): 0.117187500,
(1, 2, 0): 0.023437500, (0, 3, 0): 0.015625000}
for x in r_vals:
assert_allclose(multinomial.pmf(x, n, p), r_vals[x], atol=1e-14)
def test_rvs_np(self):
# test that .rvs agrees w/numpy
sc_rvs = multinomial.rvs(3, [1/4.]*3, size=7, random_state=123)
rndm = np.random.RandomState(123)
np_rvs = rndm.multinomial(3, [1/4.]*3, size=7)
assert_equal(sc_rvs, np_rvs)
def test_pmf(self):
vals0 = multinomial.pmf((5,), 5, (1,))
assert_allclose(vals0, 1, rtol=1e-8)
vals1 = multinomial.pmf((3,4), 7, (.3, .7))
assert_allclose(vals1, .22689449999999994, rtol=1e-8)
vals2 = multinomial.pmf([[[3,5],[0,8]], [[-1, 9], [1, 1]]], 8,
(.1, .9))
assert_allclose(vals2, [[.03306744, .43046721], [0, 0]], rtol=1e-8)
x = np.empty((0,2), dtype=np.float64)
vals3 = multinomial.pmf(x, 4, (.3, .7))
assert_equal(vals3, np.empty([], dtype=np.float64))
vals4 = multinomial.pmf([1,2], 4, (.3, .7))
assert_allclose(vals4, 0, rtol=1e-8)
def test_pmf_broadcasting(self):
vals0 = multinomial.pmf([1, 2], 3, [[.1, .9], [.2, .8]])
assert_allclose(vals0, [.243, .384], rtol=1e-8)
vals1 = multinomial.pmf([1, 2], [3, 4], [.1, .9])
assert_allclose(vals1, [.243, 0], rtol=1e-8)
vals2 = multinomial.pmf([[[1, 2], [1, 1]]], 3, [.1, .9])
assert_allclose(vals2, [[.243, 0]], rtol=1e-8)
vals3 = multinomial.pmf([1, 2], [[[3], [4]]], [.1, .9])
assert_allclose(vals3, [[[.243], [0]]], rtol=1e-8)
vals4 = multinomial.pmf([[1, 2], [1,1]], [[[[3]]]], [.1, .9])
assert_allclose(vals4, [[[[.243, 0]]]], rtol=1e-8)
def test_cov(self):
cov1 = multinomial.cov(5, (.2, .3, .5))
cov2 = [[5*.2*.8, -5*.2*.3, -5*.2*.5],
[-5*.3*.2, 5*.3*.7, -5*.3*.5],
[-5*.5*.2, -5*.5*.3, 5*.5*.5]]
assert_allclose(cov1, cov2, rtol=1e-8)
def test_cov_broadcasting(self):
cov1 = multinomial.cov(5, [[.1, .9], [.2, .8]])
cov2 = [[[.45, -.45],[-.45, .45]], [[.8, -.8], [-.8, .8]]]
assert_allclose(cov1, cov2, rtol=1e-8)
cov3 = multinomial.cov([4, 5], [.1, .9])
cov4 = [[[.36, -.36], [-.36, .36]], [[.45, -.45], [-.45, .45]]]
assert_allclose(cov3, cov4, rtol=1e-8)
cov5 = multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
cov6 = [[[4*.3*.7, -4*.3*.7], [-4*.3*.7, 4*.3*.7]],
[[5*.4*.6, -5*.4*.6], [-5*.4*.6, 5*.4*.6]]]
assert_allclose(cov5, cov6, rtol=1e-8)
def test_entropy(self):
# this is equivalent to a binomial distribution with n=2, so the
# entropy .77899774929 is easily computed "by hand"
ent0 = multinomial.entropy(2, [.2, .8])
assert_allclose(ent0, binom.entropy(2, .2), rtol=1e-8)
def test_entropy_broadcasting(self):
ent0 = multinomial.entropy([2, 3], [.2, .3])
assert_allclose(ent0, [binom.entropy(2, .2), binom.entropy(3, .2)],
rtol=1e-8)
ent1 = multinomial.entropy([7, 8], [[.3, .7], [.4, .6]])
assert_allclose(ent1, [binom.entropy(7, .3), binom.entropy(8, .4)],
rtol=1e-8)
ent2 = multinomial.entropy([[7], [8]], [[.3, .7], [.4, .6]])
assert_allclose(ent2,
[[binom.entropy(7, .3), binom.entropy(7, .4)],
[binom.entropy(8, .3), binom.entropy(8, .4)]],
rtol=1e-8)
def test_mean(self):
mean1 = multinomial.mean(5, [.2, .8])
assert_allclose(mean1, [5*.2, 5*.8], rtol=1e-8)
def test_mean_broadcasting(self):
mean1 = multinomial.mean([5, 6], [.2, .8])
assert_allclose(mean1, [[5*.2, 5*.8], [6*.2, 6*.8]], rtol=1e-8)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
n = 12
pvals = (.1, .2, .3, .4)
x = [[0,0,0,12],[0,0,1,11],[0,1,1,10],[1,1,1,9],[1,1,2,8]]
x = np.asarray(x, dtype=np.float64)
mn_frozen = multinomial(n, pvals)
assert_allclose(mn_frozen.pmf(x), multinomial.pmf(x, n, pvals))
assert_allclose(mn_frozen.logpmf(x), multinomial.logpmf(x, n, pvals))
assert_allclose(mn_frozen.entropy(), multinomial.entropy(n, pvals))
class TestInvwishart(object):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
class TestSpecialOrthoGroup(object):
def test_reproducibility(self):
np.random.seed(514)
x = special_ortho_group.rvs(3)
expected = np.array([[0.99394515, -0.04527879, 0.10011432],
[-0.04821555, 0.63900322, 0.76769144],
[-0.09873351, -0.76787024, 0.63295101]])
assert_array_almost_equal(x, expected)
random_state = np.random.RandomState(seed=514)
x = special_ortho_group.rvs(3, random_state=random_state)
assert_array_almost_equal(x, expected)
def test_invalid_dim(self):
assert_raises(ValueError, special_ortho_group.rvs, None)
assert_raises(ValueError, special_ortho_group.rvs, (2, 2))
assert_raises(ValueError, special_ortho_group.rvs, 1)
assert_raises(ValueError, special_ortho_group.rvs, 2.5)
def test_frozen_matrix(self):
dim = 7
frozen = special_ortho_group(dim)
rvs1 = frozen.rvs(random_state=1234)
rvs2 = special_ortho_group.rvs(dim, random_state=1234)
assert_equal(rvs1, rvs2)
def test_det_and_ortho(self):
xs = [special_ortho_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that determinants are always +1
dets = [np.linalg.det(x) for x in xs]
assert_allclose(dets, [1.]*30, rtol=1e-13)
# Test that these are orthogonal matrices
for x in xs:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = 0.39 # ...so don't expect much precision
np.random.seed(514)
xs = special_ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
class TestOrthoGroup(object):
def test_reproducibility(self):
np.random.seed(514)
x = ortho_group.rvs(3)
x2 = ortho_group.rvs(3, random_state=514)
# Note this matrix has det -1, distinguishing O(N) from SO(N)
expected = np.array([[0.993945, -0.045279, 0.100114],
[-0.048216, -0.998469, 0.02711],
[-0.098734, 0.031773, 0.994607]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
assert_almost_equal(np.linalg.det(x), -1)
def test_invalid_dim(self):
assert_raises(ValueError, ortho_group.rvs, None)
assert_raises(ValueError, ortho_group.rvs, (2, 2))
assert_raises(ValueError, ortho_group.rvs, 1)
assert_raises(ValueError, ortho_group.rvs, 2.5)
def test_det_and_ortho(self):
xs = [ortho_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that determinants are always +1
dets = [np.fabs(np.linalg.det(x)) for x in xs]
assert_allclose(dets, [1.]*30, rtol=1e-13)
# Test that these are orthogonal matrices
for x in xs:
assert_array_almost_equal(np.dot(x, x.T),
np.eye(x.shape[0]))
def test_haar(self):
# Test that the distribution is constant under rotation
# Every column should have the same distribution
# Additionally, the distribution should be invariant under another rotation
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
ks_prob = 0.39 # ...so don't expect much precision
np.random.seed(518) # Note that the test is sensitive to seed too
xs = ortho_group.rvs(dim, size=samples)
# Dot a few rows (0, 1, 2) with unit vectors (0, 2, 4, 3),
# effectively picking off entries in the matrices of xs.
# These projections should all have the same disribution,
# establishing rotational invariance. We use the two-sided
# KS test to confirm this.
# We could instead test that angles between random vectors
# are uniformly distributed, but the below is sufficient.
# It is not feasible to consider all pairs, so pick a few.
els = ((0,0), (0,2), (1,4), (2,3))
#proj = {(er, ec): [x[er][ec] for x in xs] for er, ec in els}
proj = dict(((er, ec), sorted([x[er][ec] for x in xs])) for er, ec in els)
pairs = [(e0, e1) for e0 in els for e1 in els if e0 > e1]
ks_tests = [ks_2samp(proj[p0], proj[p1])[1] for (p0, p1) in pairs]
assert_array_less([ks_prob]*len(pairs), ks_tests)
class TestRandomCorrelation(object):
def test_reproducibility(self):
np.random.seed(514)
eigs = (.5, .8, 1.2, 1.5)
x = random_correlation.rvs((.5, .8, 1.2, 1.5))
x2 = random_correlation.rvs((.5, .8, 1.2, 1.5), random_state=514)
expected = np.array([[1., -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1., -0.24351129, 0.06703474],
[0.18366501, -0.24351129, 1., 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1.]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_eigs(self):
assert_raises(ValueError, random_correlation.rvs, None)
assert_raises(ValueError, random_correlation.rvs, 'test')
assert_raises(ValueError, random_correlation.rvs, 2.5)
assert_raises(ValueError, random_correlation.rvs, [2.5])
assert_raises(ValueError, random_correlation.rvs, [[1,2],[3,4]])
assert_raises(ValueError, random_correlation.rvs, [2.5, -.5])
assert_raises(ValueError, random_correlation.rvs, [1, 2, .1])
def test_definition(self):
# Test the defintion of a correlation matrix in several dimensions:
#
# 1. Det is product of eigenvalues (and positive by construction
# in examples)
# 2. 1's on diagonal
# 3. Matrix is symmetric
def norm(i, e):
return i*e/sum(e)
np.random.seed(123)
eigs = [norm(i, np.random.uniform(size=i)) for i in range(2, 6)]
eigs.append([4,0,0,0])
ones = [[1.]*len(e) for e in eigs]
xs = [random_correlation.rvs(e) for e in eigs]
# Test that determinants are products of eigenvalues
# These are positive by construction
# Could also test that the eigenvalues themselves are correct,
# but this seems sufficient.
dets = [np.fabs(np.linalg.det(x)) for x in xs]
dets_known = [np.prod(e) for e in eigs]
assert_allclose(dets, dets_known, rtol=1e-13, atol=1e-13)
# Test for 1's on the diagonal
diags = [np.diag(x) for x in xs]
for a, b in zip(diags, ones):
assert_allclose(a, b, rtol=1e-13)
# Correlation matrices are symmetric
for x in xs:
assert_allclose(x, x.T, rtol=1e-13)
def test_to_corr(self):
# Check some corner cases in to_corr
# ajj == 1
m = np.array([[0.1, 0], [0, 1]], dtype=float)
m = random_correlation._to_corr(m)
assert_allclose(m, np.array([[1, 0], [0, 0.1]]))
# Floating point overflow; fails to compute the correct
# rotation, but should still produce some valid rotation
# rather than infs/nans
with np.errstate(over='ignore'):
g = np.array([[0, 1], [-1, 0]])
m0 = np.array([[1e300, 0], [0, np.nextafter(1, 0)]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
m0 = np.array([[0.9, 1e300], [1e300, 1.1]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m, g.T.dot(m0).dot(g))
# Zero discriminant; should set the first diag entry to 1
m0 = np.array([[2, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
# Slightly negative discriminant; should be approx correct still
m0 = np.array([[2 + 1e-7, 1], [1, 2]], dtype=float)
m = random_correlation._to_corr(m0.copy())
assert_allclose(m[0,0], 1)
class TestUnitaryGroup(object):
def test_reproducibility(self):
np.random.seed(514)
x = unitary_group.rvs(3)
x2 = unitary_group.rvs(3, random_state=514)
expected = np.array([[0.308771+0.360312j, 0.044021+0.622082j, 0.160327+0.600173j],
[0.732757+0.297107j, 0.076692-0.4614j, -0.394349+0.022613j],
[-0.148844+0.357037j, -0.284602-0.557949j, 0.607051+0.299257j]])
assert_array_almost_equal(x, expected)
assert_array_almost_equal(x2, expected)
def test_invalid_dim(self):
assert_raises(ValueError, unitary_group.rvs, None)
assert_raises(ValueError, unitary_group.rvs, (2, 2))
assert_raises(ValueError, unitary_group.rvs, 1)
assert_raises(ValueError, unitary_group.rvs, 2.5)
def test_unitarity(self):
xs = [unitary_group.rvs(dim)
for dim in range(2,12)
for i in range(3)]
# Test that these are unitary matrices
for x in xs:
assert_allclose(np.dot(x, x.conj().T), np.eye(x.shape[0]), atol=1e-15)
def test_haar(self):
# Test that the eigenvalues, which lie on the unit circle in
# the complex plane, are uncorrelated.
# Generate samples
dim = 5
samples = 1000 # Not too many, or the test takes too long
np.random.seed(514) # Note that the test is sensitive to seed too
xs = unitary_group.rvs(dim, size=samples)
# The angles "x" of the eigenvalues should be uniformly distributed
# Overall this seems to be a necessary but weak test of the distribution.
eigs = np.vstack(scipy.linalg.eigvals(x) for x in xs)
x = np.arctan2(eigs.imag, eigs.real)
res = kstest(x.ravel(), uniform(-np.pi, 2*np.pi).cdf)
assert_(res.pvalue > 0.05)
def check_pickling(distfn, args):
# check that a distribution instance pickles and unpickles
# pay special attention to the random_state property
# save the random_state (restore later)
rndm = distfn.random_state
distfn.random_state = 1234
distfn.rvs(*args, size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(*args, size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(*args, size=8)
assert_equal(r0, r1)
# restore the random_state
distfn.random_state = rndm
def test_random_state_property():
scale = np.eye(3)
scale[0, 1] = 0.5
scale[1, 0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)],
[multinomial, (5, [0.5, 0.4, 0.1])],
[ortho_group, (2,)],
[special_ortho_group, (2,)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
check_pickling(distfn, args)
|
import re
from django.conf import settings
from django.utils import translation
from haystack import connections
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery
from haystack.constants import DEFAULT_ALIAS
from haystack.utils.loading import load_backend
class MultilingualSearchBackend(BaseSearchBackend):
def update(self, index, iterable, commit=True):
initial_language = translation.get_language()[:2]
# retrieve unique backend name
backends = []
for language, _ in settings.LANGUAGES:
using = '%s-%s' % (self.connection_alias, language)
# Ensure each backend is called only once
if using in backends:
continue
else:
backends.append(using)
translation.activate(language)
backend = connections[using].get_backend()
backend.parent_class.update(backend, index, iterable, commit)
translation.activate(initial_language)
def clear(self, **kwargs):
return
class MultilingualSearchEngine(BaseEngine):
backend = MultilingualSearchBackend
#query = MultilingualSearchQuery
def get_query(self):
language = translation.get_language()[:2]
using = '%s-%s' % (self.using, language)
return connections[using].get_query()
class LanguageSearchBackend(BaseSearchBackend):
def update(self, *args, **kwargs):
# Handle all updates through the main Multilingual object.
return
class LanguageSearchQuery(BaseSearchQuery):
pass
class LanguageSearchEngine(BaseEngine):
def __init__(self, **kwargs):
conn_config = settings.HAYSTACK_CONNECTIONS[kwargs['using']]
base_engine = load_backend(conn_config['BASE_ENGINE'])(**kwargs)
backend_bases = (LanguageSearchBackend, base_engine.backend)
backend_class = type('LanguageSearchBackend', backend_bases,
{'parent_class': base_engine.backend})
self.backend = backend_class
self.query = base_engine.query
super(LanguageSearchEngine, self).__init__(**kwargs)
|
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Usage:
python android_commands.py wait-for-pm
"""
import collections
import datetime
import logging
import optparse
import os
import pexpect
import re
import subprocess
import sys
import tempfile
import time
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
'..', 'third_party', 'android', 'testrunner'))
import adb_interface
import cmd_helper
import errors # is under ../../third_party/android/testrunner/errors.py
from run_tests_helper import IsRunningAsBuildbot
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
SHELL_PROMPT = '~+~PQ\x17RS~+~'
SCALING_GOVERNOR = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor'
DROP_CACHES = '/proc/sys/vm/drop_caches'
LOCAL_PROPERTIES_PATH = '/data/local.prop'
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
BOOT_COMPLETE_RE = re.compile(
re.escape('android.intent.action.MEDIA_MOUNTED path: /mnt/sdcard')
+ '|' + re.escape('PowerManagerService: bootCompleted'))
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
KEYCODE_BACK = 4
def GetEmulators():
"""Returns a list of emulators. Does not filter by status (e.g. offline).
Both devices starting with 'emulator' will be returned in below output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
emulator-5558 device
"""
re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
return devices
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
"""
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
preferred_device = os.environ.get("ANDROID_SERIAL")
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def _GetHostFileInfo(file_name):
"""Returns a tuple containing size and modified UTC time for file_name."""
# The time accuracy on device is only to minute level, remove the second and
# microsecond from host results.
utc_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(file_name))
time_delta = datetime.timedelta(seconds=utc_time.second,
microseconds=utc_time.microsecond)
return os.path.getsize(file_name), utc_time - time_delta
def ListHostPathContents(path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
if os.path.isfile(path):
return {os.path.basename(path): _GetHostFileInfo(path)}
ret = {}
for root, dirs, files in os.walk(path):
for d in dirs:
if d.startswith('.'):
dirs.remove(d) # Prune the dir for subsequent iterations.
for f in files:
if f.startswith('.'):
continue
full_file_name = os.path.join(root, f)
file_name = os.path.relpath(full_file_name, path)
ret[file_name] = _GetHostFileInfo(full_file_name)
return ret
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir)+1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta;
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def GetLogTimestamp(log_line):
"""Returns the timestamp of the given |log_line|."""
try:
return datetime.datetime.strptime(log_line[:18], '%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
wait_for_pm: If true, issues an adb wait-for-device command.
"""
def __init__(self, device=None, wait_for_pm=False):
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
if wait_for_pm:
self.WaitForDevicePm()
self._logcat = None
self._original_governor = None
self._pushed_files = []
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def WaitForDevicePm(self):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm()
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s' % str(e))
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def SynchronizeDateTime(self):
"""Synchronize date/time between host and device."""
self._adb.SendShellCommand('date -u %f' % time.time())
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot:
self._adb.SendCommand('reboot')
else:
self.RestartShell()
self.WaitForDevicePm()
self.StartMonitoringLogcat(timeout=120)
self.WaitForLogMatch(BOOT_COMPLETE_RE)
self.UnlockDevice()
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
"""
uninstall_command = 'uninstall %s' % package
logging.info('>>> $' + uninstall_command)
self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
"""
assert os.path.isfile(package_file_path)
install_command = 'install %s' % package_file_path
logging.info('>>> $' + install_command)
self._adb.SendCommand(install_command, timeout_time=2*60)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=True):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
logging.info('>>> $' + command)
if "'" in command: logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand("'%s'" % command,
timeout_time).splitlines()
if log_result:
logging.info('\n>>> '.join(result))
return result
def KillAll(self, process):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off
Returns:
the number of processess killed
"""
pids = self.ExtractPid(process)
if pids:
self.RunShellCommand('kill ' + ' '.join(pids))
return len(pids)
def StartActivity(self, package, activity,
action='android.intent.action.VIEW', data=None,
extras=None, trace_file_name=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.android.chrome').
activity: Name of activity (e.g. '.Main' or 'com.android.chrome.Main').
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity.
trace_file_name: If used, turns on and saves the trace to this file name.
"""
cmd = 'am start -a %s -n %s/%s' % (action, package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
cmd += ' -e'
for key in extras:
cmd += ' %s %s' % (key, extras[key])
if trace_file_name:
cmd += ' -S -P ' + trace_file_name
self.RunShellCommand(cmd)
def EnableAdbRoot(self):
"""Enable root on the device."""
self._adb.EnableAdbRoot()
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g. com.android.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
self.CloseApplication(package)
self.RunShellCommand('rm -r /data/data/%s/cache/*' % package)
self.RunShellCommand('rm -r /data/data/%s/files/*' % package)
self.RunShellCommand('rm -r /data/data/%s/shared_prefs/*' % package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def PushIfNeeded(self, local_path, device_path):
"""Pushes |local_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same timestamp
and size.
All pushed files can be removed by calling RemovePushedFiles().
"""
assert os.path.exists(local_path)
self._pushed_files.append(device_path)
# If the path contents are the same, there's nothing to do.
local_contents = ListHostPathContents(local_path)
device_contents = self.ListPathContents(device_path)
# Only compare the size and timestamp if only copying a file because
# the filename on device can be renamed.
if os.path.isfile(local_path):
assert len(local_contents) == 1
is_equal = local_contents.values() == device_contents.values()
else:
is_equal = local_contents == device_contents
if is_equal:
logging.info('%s is up-to-date. Skipping file push.' % device_path)
return
# They don't match, so remove everything first and then create it.
if os.path.isdir(local_path):
self.RunShellCommand('rm -r %s' % device_path, timeout_time=2*60)
self.RunShellCommand('mkdir -p %s' % device_path)
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
# 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (local_path, device_path)
logging.info('>>> $' + push_command)
output = self._adb.SendCommand(push_command, timeout_time=30*60)
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', output):
logging.critical('PUSH FAILED: ' + output)
def GetFileContents(self, filename):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('if [ -f "' + filename + '" ]; then cat "' +
filename + '"; fi')
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2*60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.RunShellCommand('date +%z')[0])
def SetupPerformanceTest(self):
"""Sets up performance tests."""
# Disable CPU scaling to reduce noise in tests
if not self._original_governor:
self._original_governor = self.RunShellCommand('cat ' + SCALING_GOVERNOR)
self.RunShellCommand('echo performance > ' + SCALING_GOVERNOR)
self.DropRamCaches()
def TearDownPerformanceTest(self):
"""Tears down performance tests."""
if self._original_governor:
self.RunShellCommand('echo %s > %s' % (self._original_governor[0],
SCALING_GOVERNOR))
self._original_governor = None
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
properties = file(temp_props_file.name).read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
if was_set == enable:
return False
self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
enable and 'all' or ''))
return True
def DropRamCaches(self):
"""Drops the filesystem ram caches for performance testing."""
self.RunShellCommand('echo 3 > ' + DROP_CACHES)
def StartMonitoringLogcat(self, clear=True, timeout=10, logfile=None,
filters=[]):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
timeout: How long WaitForLogMatch will wait for the given match
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
# Spawn logcat and syncronize with it.
for _ in range(4):
self._logcat = pexpect.spawn('adb', args, timeout=timeout,
logfile=logfile)
self.RunShellCommand('log startup_sync')
if self._logcat.expect(['startup_sync', pexpect.EOF,
pexpect.TIMEOUT]) == 0:
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, search_re):
"""Blocks until a line containing |line_re| is logged or a timeout occurs.
Args:
search_re: The compiled re to search each line for.
Returns:
The re match object.
"""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
logging.info('<<< Waiting for logcat:' + str(search_re.pattern))
t0 = time.time()
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + self._logcat.timeout - time.time()
if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
search_match = search_re.search(line)
if search_match:
return search_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(self._logcat.timeout, search_re.pattern))
def StartRecordingLogcat(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb logcat -v threadtime %s' % ' '.join(filters)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=subprocess.PIPE)
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() == None:
self.logcat_process.kill()
(output, _) = self.logcat_process.communicate()
self.logcat_process = None
return output
def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
"""
pids = []
for line in self.RunShellCommand('ps'):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
pids.append(data[1]) # PID is in the second column
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
# Field definitions.
# http://www.kernel.org/doc/Documentation/iostats.txt
device = 2
num_reads_issued_idx = 3
num_reads_merged_idx = 4
num_sectors_read_idx = 5
ms_spent_reading_idx = 6
num_writes_completed_idx = 7
num_writes_merged_idx = 8
num_sectors_written_idx = 9
ms_spent_writing_idx = 10
num_ios_in_progress_idx = 11
ms_spent_doing_io_idx = 12
ms_spent_doing_io_weighted_idx = 13
for line in self.RunShellCommand('cat /proc/diskstats'):
fields = line.split()
if fields[device] == 'mmcblk0':
return {
'num_reads': int(fields[num_reads_issued_idx]),
'num_writes': int(fields[num_writes_completed_idx]),
'read_ms': int(fields[ms_spent_reading_idx]),
'write_ms': int(fields[ms_spent_writing_idx]),
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsage(self, package):
"""Returns the memory usage for all processes whose name contains |pacakge|.
Args:
name: A string holding process name to lookup pid list for.
Returns:
Dict of {metric:usage_kb}, summed over all pids associated with |name|.
The metric keys retruned are: Size, Rss, Pss, Shared_Clean, Shared_Dirty,
Private_Clean, Private_Dirty, Referenced, Swap, KernelPageSize,
MMUPageSize.
"""
usage_dict = collections.defaultdict(int)
pid_list = self.ExtractPid(package)
# We used to use the showmap command, but it is currently broken on
# stingray so it's easier to just parse /proc/<pid>/smaps directly.
memory_stat_re = re.compile('^(?P<key>\w+):\s+(?P<value>\d+) kB$')
for pid in pid_list:
for line in self.RunShellCommand('cat /proc/%s/smaps' % pid,
log_result=False):
match = re.match(memory_stat_re, line)
if match: usage_dict[match.group('key')] += int(match.group('value'))
if not usage_dict or not any(usage_dict.values()):
# Presumably the process died between ps and showmap.
logging.warning('Could not find memory usage for pid ' + str(pid))
return usage_dict
def UnlockDevice(self):
"""Unlocks the screen of the device."""
# Make sure a menu button event will actually unlock the screen.
if IsRunningAsBuildbot():
assert self.RunShellCommand('getprop ro.test_harness')[0].strip() == '1'
# The following keyevent unlocks the screen if locked.
self.SendKeyEvent(KEYCODE_MENU)
# If the screen wasn't locked the previous command will bring up the menu,
# which this will dismiss. Otherwise this shouldn't change anything.
self.SendKeyEvent(KEYCODE_BACK)
def main(argv):
option_parser = optparse.OptionParser()
option_parser.add_option('-w', '--wait_for_pm', action='store_true',
default=False, dest='wait_for_pm',
help='Waits for Device Package Manager to become available')
option_parser.add_option('--enable_asserts', dest='set_asserts',
action='store_true', default=None,
help='Sets the dalvik.vm.enableassertions property to "all"')
option_parser.add_option('--disable_asserts', dest='set_asserts',
action='store_false', default=None,
help='Removes the dalvik.vm.enableassertions property')
options, args = option_parser.parse_args(argv)
commands = AndroidCommands(wait_for_pm=options.wait_for_pm)
if options.set_asserts != None:
if commands.SetJavaAssertsEnabled(options.set_asserts):
commands.Reboot(full_reboot=False)
if __name__ == '__main__':
main(sys.argv)
|
from django.db import models
import re
from django.conf import settings
class StatusUpdate(models.Model):
summary = models.CharField('Short Summary', max_length=255, blank=False)
posted_by = models.CharField(max_length=255, blank=False)
duration_minutes = models.IntegerField(null = True, blank=True)
admin_assigned = models.CharField(max_length=255, blank=False)
bugzilla_id = models.CharField(max_length=255, blank=True, null=True)
description = models.TextField(blank=False)
impact_of_work = models.TextField(blank=False)
created_on = models.DateTimeField(auto_now_add=True)
start_time = models.DateTimeField(null=False, blank=False)
end_time = models.DateTimeField(null=True, blank=True)
posted_on = models.DateTimeField(auto_now_add=True)
severity = models.ForeignKey('Severity')
status = models.ForeignKey('Status')
frontpage = models.BooleanField('Display on Homepage', default = True)
timezone = models.ForeignKey('TimeZone')
site = models.ForeignKey('Site', null=True)
from_bugzilla = models.BooleanField()
is_private = models.BooleanField()
search_fields = (
'summary',
'description',
'bugzilla_id',
'severity__name',
'posted_by',
'admin_assigned',
'serviceoutage__service__name',
)
class Meta:
verbose_name_plural = 'Status Updates'
def expand_bug(self, input_val):
regex = '[B|b]ug (\d+)'
try:
matches = re.findall(regex, input_val)
except TypeError:
return ''
if matches:
for m in matches:
input_val = re.sub('[B|b]ug %s' % m, "<a href='https://bugzil.la/%s'>Bug %s</a>" % (m, m), input_val)
return input_val
def expand(self, input_val):
input_val = self.expand_bug(input_val)
return input_val
@property
def impact_of_work_expanded(self):
return self.expand(self.impact_of_work)
@property
def summary_expanded(self):
return self.expand(self.summary)
@property
def description_expanded(self):
return self.expand(self.description)
def __unicode__(self):
return self.summary
@property
def event_start_date(self):
return self.start_time.strftime("%Y-%m-%d")
@property
def event_start_time(self):
return self.start_time.strftime("%H:%M")
@property
def event_end_date(self):
return self.end_time.strftime("%Y-%m-%d")
@property
def event_end_time(self):
return self.end_time.strftime("%H:%M")
@property
def services(self):
return ", ".join([s.service.name for s in self.serviceoutage_set.all()])
@property
def bugzilla_links(self):
ret_string = ''
bugs = self.bugzilla_id.replace(' ','')
bugs = bugs.split(',')
counter = 1
for bug in bugs:
ret_string += '<a href="%s%s">%s</a>' % (settings.BUGZILLA_URL, bug, bug)
if counter < len(bugs):
ret_string += ', '
counter += 1
return ret_string
@models.permalink
def get_absolute_url(self):
return ('article-detail', [self.id])
def expand_minutes(self):
input_minutes = self.duration_minutes
hours = input_minutes / 60
minutes = input_minutes % 60
if hours == 1:
hour_string = 'hour'
else:
hour_string = 'hours'
if minutes == 1:
minute_string = 'minute'
else:
minute_string = 'minutes'
if hours > 0 and minutes == 0:
return "%s %s" % (
hours,
hour_string
)
elif hours == 0 and minutes > 0:
return "%s %s" % (
minutes,
minute_string
)
else:
return "%s %s %s %s" % (
hours,
hour_string,
minutes,
minute_string
)
class Severity(models.Model):
name = models.CharField(max_length=255, blank=False)
css_class = models.CharField(max_length=255, blank=False)
sort_order = models.IntegerField(blank=False, default=0)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Severeties'
class Status(models.Model):
name = models.CharField(max_length=255, blank=False)
sort_order = models.IntegerField(blank=False, default=0)
def __unicode__(self):
return self.name
def __repr__(self):
return self.name
class Meta:
verbose_name_plural = 'Statuses'
class ServiceOutage(models.Model):
status_update = models.ForeignKey('StatusUpdate')
service = models.ForeignKey('Service')
def __unicode__(self):
return "%s - %s" % (self.status_update, self.service)
class Meta:
verbose_name_plural = 'Service Outages'
class Service(models.Model):
name = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return self.name
class SourceEmailAddress(models.Model):
name = models.CharField(max_length=255, blank=False)
short_description = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return self.name
class TimeZone(models.Model):
name = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return self.name
class Site(models.Model):
name = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return self.name
class StatusUpdateComment(models.Model):
author = models.CharField(max_length=255, blank=False)
comment = models.TextField(blank=False)
created_on = models.DateTimeField(auto_now_add=True)
statusupdate = models.ForeignKey('StatusUpdate')
def __unicode__(self):
return "%s - %s" % (self.author, self.created_on)
class DestinationEmailAddress(models.Model):
name = models.CharField(max_length=255, blank=False)
def __unicode__(self):
return self.name
class OutageNotificationTemplate(models.Model):
interpolated_variable_hash = {}
name = models.CharField(max_length=255, blank=False)
subject = models.CharField(max_length=255, blank=False)
outage_notification_template = models.TextField(blank=False)
def extract_variable_to_interpolate(self, input_line):
var_re = re.compile('<<([^>><<]*)>>')
result = var_re.findall(input_line)
if not result:
return None
else:
rethash = {}
for r in result:
rethash['<<%s>>' % r] = r
self.interpolated_variable_hash['<<%s>>' % r] = r
return rethash
def interpolate(self, status_update, template):
self.status_update = status_update
for line in template.split():
self.extract_variable_to_interpolate(line)
for k in self.interpolated_variable_hash.iterkeys():
try:
template = template.replace(k, str(getattr(status_update, self.interpolated_variable_hash[k])))
except (AttributeError):
pass
return template
def interpolate_subject(self, status_update = None, template = None):
if not template:
template = self.subject
return self.interpolate(status_update, template)
def interpolate_template(self, status_update = None, template = None):
self.status_update = status_update
if not template:
template = self.outage_notification_template
return self.interpolate(status_update, template)
def __init__(self, *args, **kwargs):
self.interpolated_variable_hash = {}
super(OutageNotificationTemplate, self).__init__(*args, **kwargs)
def __unicode__(self):
return self.name
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence, Union
from packaging.version import parse
from pandas import DataFrame, Series
if TYPE_CHECKING:
import numpy as np
if parse(np.__version__) < parse("1.22.0"):
raise NotImplementedError(
"NumPy 1.22.0 or later required for type checking"
)
from numpy.typing import (
ArrayLike as ArrayLike,
DTypeLike,
NDArray,
_FloatLike_co,
_UIntLike_co,
)
_ExtendedFloatLike_co = Union[_FloatLike_co, _UIntLike_co]
NumericArray = NDArray[Any, np.dtype[_ExtendedFloatLike_co]]
Float64Array = NDArray[Any, np.double]
ArrayLike1D = Union[Sequence[Union[float, int]], NumericArray, Series]
ArrayLike2D = Union[
Sequence[Sequence[Union[float, int]]], NumericArray, DataFrame
]
else:
ArrayLike = Any
DTypeLike = Any
Float64Array = Any
NumericArray = Any
ArrayLike1D = Any
ArrayLike2D = Any
NDArray = Any
__all__ = [
"ArrayLike",
"DTypeLike",
"Float64Array",
"ArrayLike1D",
"ArrayLike2D",
"NDArray",
"NumericArray",
]
|
"""Testing facility for conkit.io.a2m"""
__author__ = "Felix Simkovic"
__date__ = "30 Jul 2018"
import os
import unittest
from conkit.io.a2m import A2mParser
from conkit.io._iotools import create_tmp_f
class TestA2mParser(unittest.TestCase):
def test_read_1(self):
msa = """GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF
EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA
EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF
EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF
"""
f_name = create_tmp_f(content=msa)
parser = A2mParser()
with open(f_name, 'r') as f_in:
sequence_file = parser.read(f_in)
for i, sequence_entry in enumerate(sequence_file):
if i == 0:
self.assertEqual('seq_0', sequence_entry.id)
self.assertEqual('GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF', sequence_entry.seq)
elif i == 1:
self.assertEqual('seq_1', sequence_entry.id)
self.assertEqual('EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA', sequence_entry.seq)
elif i == 2:
self.assertEqual('seq_2', sequence_entry.id)
self.assertEqual('EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF', sequence_entry.seq)
elif i == 3:
self.assertEqual('seq_3', sequence_entry.id)
self.assertEqual('EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF', sequence_entry.seq)
os.unlink(f_name)
def test_read_2(self):
msa = """>header1
GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF
>header2
EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA
>header3
EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF
>header4
EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF
"""
f_name = create_tmp_f(content=msa)
parser = A2mParser()
with open(f_name, 'r') as f_in:
with self.assertRaises(ValueError):
parser.read(f_in)
os.unlink(f_name)
def test_write_1(self):
msa = [
'GSMFTPKPPQDSAVI--GYCVKQGAVMKNWKRRY--LDENTIGYF',
'EVHK--ECKQSDIMMRD--FEIVTTSRTFYVQADSPEEMHSWIKA',
'EVHKVQECK--DIMMRDNLFEI--TSRTFWKRRY--LDENTIGYF',
'EVHKVQECK--DIMMRDNLFEI--TSRTF--RRY--LDENTIGYF',
]
f_name_in = create_tmp_f(content='\n'.join(msa))
f_name_out = create_tmp_f()
parser = A2mParser()
with open(f_name_in, 'r') as f_in, open(f_name_out, 'w') as f_out:
sequence_file = parser.read(f_in)
parser.write(f_out, sequence_file)
with open(f_name_out, 'r') as f_in:
output = f_in.read().splitlines()
self.assertEqual(msa, output)
map(os.unlink, [f_name_in, f_name_out])
if __name__ == "__main__":
unittest.main(verbosity=2)
|
import os
try:
from xml.parsers.expat import ParserCreate
except ImportError:
_haveExpat = 0
from xml.parsers.xmlproc.xmlproc import XMLProcessor
else:
_haveExpat = 1
class XMLParser:
def __init__(self):
self.root = []
self.current = (self.root, None)
def getRoot(self):
assert len(self.root) == 1
return self.root[0]
def startElementHandler(self, name, attrs):
children = []
self.current = (children, name, attrs, self.current)
def endElementHandler(self, name):
children, name, attrs, previous = self.current
previous[0].append((name, attrs, children))
self.current = previous
def characterDataHandler(self, data):
nodes = self.current[0]
if nodes and type(nodes[-1]) == type(data):
nodes[-1] = nodes[-1] + data
else:
nodes.append(data)
def _expatParseFile(self, pathOrFile):
parser = ParserCreate()
parser.StartElementHandler = self.startElementHandler
parser.EndElementHandler = self.endElementHandler
parser.CharacterDataHandler = self.characterDataHandler
if isinstance(pathOrFile, (bytes, str)):
f = open(pathOrFile)
didOpen = 1
else:
didOpen = 0
f = pathOrFile
parser.ParseFile(f)
if didOpen:
f.close()
return self.getRoot()
def _xmlprocDataHandler(self, data, begin, end):
self.characterDataHandler(data[begin:end])
def _xmlprocParseFile(self, pathOrFile):
proc = XMLProcessor()
proc.app.handle_start_tag = self.startElementHandler
proc.app.handle_end_tag = self.endElementHandler
proc.app.handle_data = self._xmlprocDataHandler
if isinstance(pathOrFile, (bytes, str)):
f = open(pathOrFile)
didOpen = 1
else:
didOpen = 0
f = pathOrFile
proc.parseStart()
proc.read_from(f)
proc.flush()
proc.parseEnd()
proc.deref()
if didOpen:
f.close()
return self.getRoot()
if _haveExpat:
parseFile = _expatParseFile
else:
parseFile = _xmlprocParseFile
def stripCharacterData(nodes, recursive=True):
i = 0
while 1:
try:
node = nodes[i]
except IndexError:
break
if isinstance(node, tuple):
if recursive:
stripCharacterData(node[2])
i = i + 1
else:
node = node.strip()
if node:
nodes[i] = node
i = i + 1
else:
del nodes[i]
def buildTree(pathOrFile, stripData=1):
parser = XMLParser()
tree = parser.parseFile(pathOrFile)
if stripData:
stripCharacterData(tree[2])
return tree
if __name__ == "__main__":
from pprint import pprint
import sys
strip = bool(sys.argv[2:])
tree = buildTree(sys.argv[1], strip)
pprint(tree)
|
from django.contrib.messages import *
|
from django import http
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_list_or_404, get_object_or_404, redirect
from django.utils.translation import ugettext
from django.utils.cache import patch_cache_control
from django.views.decorators.cache import cache_control
from django.views.decorators.vary import vary_on_headers
import caching.base as caching
import session_csrf
import waffle
from elasticsearch_dsl import Search
from rest_framework import exceptions
from rest_framework import serializers
from rest_framework.decorators import detail_route
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
import olympia.core.logger
from olympia import amo
from olympia.abuse.models import send_abuse_report
from olympia.access import acl
from olympia.amo import messages
from olympia.amo.forms import AbuseForm
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import get_outgoing_url, get_url_prefix, reverse
from olympia.amo.utils import randslice, render
from olympia.api.pagination import ESPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowReadOnlyIfPublic, AllowRelatedObjectPermissions,
AllowReviewer, AllowReviewerUnlisted, AnyOf, GroupPermission)
from olympia.bandwagon.models import Collection
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.ratings.forms import RatingForm
from olympia.ratings.models import Rating, GroupedRating
from olympia.search.filters import (
AddonAppQueryParam, AddonCategoryQueryParam, AddonTypeQueryParam,
ReviewedContentFilter, SearchParameterFilter, SearchQueryFilter,
SortingFilter)
from olympia.translations.query import order_by_translation
from olympia.versions.models import Version
from .decorators import addon_view_factory
from .indexers import AddonIndexer
from .models import Addon, Persona, FrozenAddon, ReplacementAddon
from .serializers import (
AddonEulaPolicySerializer, AddonFeatureCompatibilitySerializer,
AddonSerializer, AddonSerializerWithUnlistedData,
ESAddonAutoCompleteSerializer, ESAddonSerializer, LanguageToolsSerializer,
ReplacementAddonSerializer, StaticCategorySerializer, VersionSerializer)
from .utils import get_creatured_ids, get_featured_ids
log = olympia.core.logger.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
@addon_valid_disabled_pending_view
@non_atomic_requests
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted or (addon.is_pending() and not addon.is_persona()):
# Allow pending themes to be listed.
raise http.Http404
if addon.is_disabled:
return render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if addon.type == amo.ADDON_PERSONA:
return persona_detail(request, addon)
else:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
@non_atomic_requests
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# Popular collections this addon is part of.
collections = Collection.objects.listed().filter(
addons=addon, application=request.APP.id)
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_denied(),
'grouped_ratings': GroupedRating.get(addon.id),
'review_form': RatingForm(),
'reviews': Rating.without_replies.all().filter(
addon=addon, is_latest=True).exclude(body=None),
'get_replies': Rating.get_replies,
'collections': collections.order_by('-subscribers')[:3],
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return render(request, 'addons/impala/details-more.html', ctx)
else:
return render(request, 'addons/impala/details.html', ctx)
def _category_personas(qs, limit):
def f():
return randslice(qs, limit=limit)
key = 'cat-personas:' + qs.query_key()
return caching.cached(f, key)
@non_atomic_requests
def persona_detail(request, addon):
"""Details page for Personas."""
if not (addon.is_public() or addon.is_pending()):
raise http.Http404
persona = addon.persona
# This persona's categories.
categories = addon.categories.all()
category_personas = None
if categories.exists():
qs = Addon.objects.public().filter(categories=categories[0])
category_personas = _category_personas(qs, limit=6)
data = {
'addon': addon,
'persona': persona,
'categories': categories,
'author_personas': persona.authors_other_addons()[:3],
'category_personas': category_personas,
}
try:
author = addon.authors.all()[0]
except IndexError:
author = None
else:
author = author.get_url_path(src='addon-detail')
data['author_gallery'] = author
dev_tags, user_tags = addon.tags_partitioned_by_developer
data.update({
'dev_tags': dev_tags,
'user_tags': user_tags,
'review_form': RatingForm(),
'reviews': Rating.without_replies.all().filter(
addon=addon, is_latest=True),
'get_replies': Rating.get_replies,
'search_cat': 'themes',
'abuse_form': AbuseForm(request=request),
})
return render(request, 'addons/persona_detail.html', data)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
return getattr(self, 'filter_{0}'.format(field))()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.base_queryset, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.base_queryset.top_free(self.request.APP, listed=False)
else:
return self.base_queryset.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.base_queryset.top_paid(self.request.APP, listed=False)
else:
return self.base_queryset.top_paid(listed=False)
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return self.base_queryset.order_by('-average_daily_users')
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_updated(self):
return self.base_queryset.order_by('-last_updated')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def filter_hotness(self):
return self.base_queryset.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.base_queryset.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
@non_atomic_requests
def home(request):
# Add-ons.
base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)
# This is lame for performance. Kill it with ES.
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
# We want to display 6 Featured Extensions, Up & Coming Extensions and
# Featured Themes.
featured = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_EXTENSION)[:6]
hotness = base.exclude(id__in=frozen).order_by('-hotness')[:6]
personas = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_PERSONA)[:6]
# Most Popular extensions is a simple links list, we display slightly more.
popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]
# We want a maximum of 6 Featured Collections as well (though we may get
# fewer than that).
collections = Collection.objects.filter(listed=True,
application=request.APP.id,
type=amo.COLLECTION_FEATURED)[:6]
return render(request, 'addons/home.html',
{'popular': popular, 'featured': featured,
'hotness': hotness, 'personas': personas,
'src': 'homepage', 'collections': collections})
@non_atomic_requests
def homepage_promos(request):
from olympia.legacy_discovery.views import promos
version, platform = request.GET.get('version'), request.GET.get('platform')
if not (platform or version):
raise http.Http404
return promos(request, 'home', version, platform)
@addon_view
@non_atomic_requests
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
@non_atomic_requests
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
@non_atomic_requests
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(channel=amo.RELEASE_CHANNEL_LISTED,
files__status__in=amo.VALID_FILE_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
@non_atomic_requests
def license_redirect(request, version):
version = get_object_or_404(Version.objects, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
@non_atomic_requests
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, ugettext('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form})
@cache_control(max_age=60 * 60 * 24)
@non_atomic_requests
def persona_redirect(request, persona_id):
if persona_id == 0:
# Newer themes have persona_id == 0, doesn't mean anything.
return http.HttpResponseNotFound()
persona = get_object_or_404(Persona.objects, persona_id=persona_id)
try:
to = reverse('addons.detail', args=[persona.addon.slug])
except Addon.DoesNotExist:
# Would otherwise throw 500. Something funky happened during GP
# migration which caused some Personas to be without Addons (problem
# with cascading deletes?). Tell GoogleBot these are dead with a 404.
return http.HttpResponseNotFound()
return http.HttpResponsePermanentRedirect(to)
@non_atomic_requests
def icloud_bookmarks_redirect(request):
if (waffle.switch_is_active('icloud_bookmarks_redirect')):
return redirect('/blocked/i1214/', permanent=False)
else:
return addon_detail(request, 'icloud-bookmarks')
DEFAULT_FIND_REPLACEMENT_PATH = '/collections/mozilla/featured-add-ons/'
FIND_REPLACEMENT_SRC = 'find-replacement'
def find_replacement_addon(request):
guid = request.GET.get('guid')
if not guid:
raise http.Http404
try:
replacement = ReplacementAddon.objects.get(guid=guid)
path = replacement.path
except ReplacementAddon.DoesNotExist:
path = DEFAULT_FIND_REPLACEMENT_PATH
else:
if replacement.has_external_url():
# It's an external URL:
return redirect(get_outgoing_url(path))
replace_url = '%s%s?src=%s' % (
('/' if not path.startswith('/') else ''), path, FIND_REPLACEMENT_SRC)
return redirect(replace_url, permanent=False)
class AddonViewSet(RetrieveModelMixin, GenericViewSet):
permission_classes = [
AnyOf(AllowReadOnlyIfPublic, AllowAddonAuthor,
AllowReviewer, AllowReviewerUnlisted),
]
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_queryset(self):
"""Return queryset to be used for the view. We implement our own that
does not depend on self.queryset to avoid cache-machine caching the
queryset too agressively (mozilla/addons-frontend#2497)."""
# Special case: admins - and only admins - can see deleted add-ons.
# This is handled outside a permission class because that condition
# would pollute all other classes otherwise.
if (self.request.user.is_authenticated() and
acl.action_allowed(self.request,
amo.permissions.ADDONS_VIEW_DELETED)):
return Addon.unfiltered.all()
# Permission classes disallow access to non-public/unlisted add-ons
# unless logged in as a reviewer/addon owner/admin, so we don't have to
# filter the base queryset here.
return Addon.objects.all()
def get_serializer_class(self):
# Override serializer to use serializer_class_with_unlisted_data if
# we are allowed to access unlisted data.
obj = getattr(self, 'instance')
request = self.request
if (acl.check_unlisted_addons_reviewer(request) or
(obj and request.user.is_authenticated() and
obj.authors.filter(pk=request.user.pk).exists())):
return self.serializer_class_with_unlisted_data
return self.serializer_class
def get_lookup_field(self, identifier):
lookup_field = 'pk'
if identifier and not identifier.isdigit():
# If the identifier contains anything other than a digit, it's
# either a slug or a guid. guids need to contain either {} or @,
# which are invalid in a slug.
if amo.ADDON_GUID_PATTERN.match(identifier):
lookup_field = 'guid'
else:
lookup_field = 'slug'
return lookup_field
def get_object(self):
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AddonViewSet, self).get_object()
return self.instance
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
Calls DRF implementation, but adds `is_disabled_by_developer` to the
exception being thrown so that clients can tell the difference between
a 401/403 returned because an add-on has been disabled by their
developer or something else.
"""
try:
super(AddonViewSet, self).check_object_permissions(request, obj)
except exceptions.APIException as exc:
exc.detail = {
'detail': exc.detail,
'is_disabled_by_developer': obj.disabled_by_user,
'is_disabled_by_mozilla': obj.status == amo.STATUS_DISABLED,
}
raise exc
@detail_route()
def feature_compatibility(self, request, pk=None):
obj = self.get_object()
serializer = AddonFeatureCompatibilitySerializer(
obj.feature_compatibility,
context=self.get_serializer_context())
return Response(serializer.data)
@detail_route()
def eula_policy(self, request, pk=None):
obj = self.get_object()
serializer = AddonEulaPolicySerializer(
obj, context=self.get_serializer_context())
return Response(serializer.data)
class AddonChildMixin(object):
"""Mixin containing method to retrieve the parent add-on object."""
def get_addon_object(self, permission_classes=None, lookup='addon_pk'):
"""Return the parent Addon object using the URL parameter passed
to the view.
`permission_classes` can be use passed to change which permission
classes the parent viewset will be used when loading the Addon object,
otherwise AddonViewSet.permission_classes will be used."""
if hasattr(self, 'addon_object'):
return self.addon_object
if permission_classes is None:
permission_classes = AddonViewSet.permission_classes
self.addon_object = AddonViewSet(
request=self.request, permission_classes=permission_classes,
kwargs={'pk': self.kwargs[lookup]}).get_object()
return self.addon_object
class AddonVersionViewSet(AddonChildMixin, RetrieveModelMixin,
ListModelMixin, GenericViewSet):
# Permissions are always checked against the parent add-on in
# get_addon_object() using AddonViewSet.permission_classes so we don't need
# to set any here. Some extra permission classes are added dynamically
# below in check_permissions() and check_object_permissions() depending on
# what the client is requesting to see.
permission_classes = []
serializer_class = VersionSerializer
def check_permissions(self, request):
requested = self.request.GET.get('filter')
if self.action == 'list':
if requested == 'all_with_deleted':
# To see deleted versions, you need Addons:ViewDeleted.
self.permission_classes = [
GroupPermission(amo.permissions.ADDONS_VIEW_DELETED)]
elif requested == 'all_with_unlisted':
# To see unlisted versions, you need to be add-on author or
# unlisted reviewer.
self.permission_classes = [AnyOf(
AllowReviewerUnlisted, AllowAddonAuthor)]
elif requested == 'all_without_unlisted':
# To see all listed versions (not just public ones) you need to
# be add-on author or reviewer.
self.permission_classes = [AnyOf(
AllowReviewer, AllowAddonAuthor)]
# When listing, we can't use AllowRelatedObjectPermissions() with
# check_permissions(), because AllowAddonAuthor needs an author to
# do the actual permission check. To work around that, we call
# super + check_object_permission() ourselves, passing down the
# addon object directly.
return super(AddonVersionViewSet, self).check_object_permissions(
request, self.get_addon_object())
super(AddonVersionViewSet, self).check_permissions(request)
def check_object_permissions(self, request, obj):
# If the instance is marked as deleted and the client is not allowed to
# see deleted instances, we want to return a 404, behaving as if it
# does not exist.
if (obj.deleted and
not GroupPermission(amo.permissions.ADDONS_VIEW_DELETED).
has_object_permission(request, self, obj)):
raise http.Http404
if obj.channel == amo.RELEASE_CHANNEL_UNLISTED:
# If the instance is unlisted, only allow unlisted reviewers and
# authors..
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewerUnlisted, AllowAddonAuthor)])
]
elif not obj.is_public():
# If the instance is disabled, only allow reviewers and authors.
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewer, AllowAddonAuthor)])
]
super(AddonVersionViewSet, self).check_object_permissions(request, obj)
def get_queryset(self):
"""Return the right base queryset depending on the situation."""
requested = self.request.GET.get('filter')
valid_filters = (
'all_with_deleted',
'all_with_unlisted',
'all_without_unlisted',
'only_beta'
)
if requested is not None:
if self.action != 'list':
raise serializers.ValidationError(
'The "filter" parameter is not valid in this context.')
elif requested not in valid_filters:
raise serializers.ValidationError(
'Invalid "filter" parameter specified.')
# By default we restrict to valid, listed versions. Some filtering
# options are available when listing, and in addition, when returning
# a single instance, we don't filter at all.
if requested == 'all_with_deleted' or self.action != 'list':
queryset = Version.unfiltered.all()
elif requested == 'all_with_unlisted':
queryset = Version.objects.all()
elif requested == 'all_without_unlisted':
queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
elif requested == 'only_beta':
queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED,
files__status=amo.STATUS_BETA).distinct()
else:
# By default, we rely on queryset filtering to hide
# non-public/unlisted versions. get_queryset() might override this
# if we are asked to see non-valid, deleted and/or unlisted
# versions explicitly.
queryset = Version.objects.filter(
files__status=amo.STATUS_PUBLIC,
channel=amo.RELEASE_CHANNEL_LISTED).distinct()
# Filter with the add-on.
return queryset.filter(addon=self.get_addon_object())
class AddonSearchView(ListAPIView):
authentication_classes = []
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
SortingFilter,
]
pagination_class = ESPageNumberPagination
permission_classes = []
serializer_class = ESAddonSerializer
def get_queryset(self):
return Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name()).extra(
_source={'excludes': AddonIndexer.hidden_fields})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class AddonAutoCompleteSearchView(AddonSearchView):
pagination_class = None
serializer_class = ESAddonAutoCompleteSerializer
def get_queryset(self):
# Minimal set of fields from ES that we need to build our results.
# It's the opposite tactic used by the regular search endpoint, which
# excludes a specific set of fields - because we know that autocomplete
# only needs to return very few things.
included_fields = (
'icon_type', # Needed for icon_url.
'id', # Needed for... id.
'modified', # Needed for icon_url.
'name_translations', # Needed for... name.
'default_locale', # Needed for translations to work.
'persona', # Needed for icon_url (sadly).
'slug', # Needed for url.
'type', # Needed to attach the Persona for icon_url (sadly).
)
return Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name()).extra(
_source={'includes': included_fields})
def list(self, request, *args, **kwargs):
# Ignore pagination (slice directly) but do wrap the data in a
# 'results' property to mimic what the search API does.
queryset = self.filter_queryset(self.get_queryset())[:10]
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class AddonFeaturedView(GenericAPIView):
authentication_classes = []
permission_classes = []
serializer_class = AddonSerializer
# We accept the 'page_size' parameter but we do not allow pagination for
# this endpoint since the order is random.
pagination_class = None
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
# Simulate pagination-like results, without actual pagination.
return Response({'results': serializer.data})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonFeaturedView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def get_queryset(self):
return Addon.objects.valid()
def filter_queryset(self, queryset):
# We can pass the optional lang parameter to either get_creatured_ids()
# or get_featured_ids() below to get locale-specific results in
# addition to the generic ones.
lang = self.request.GET.get('lang')
if 'category' in self.request.GET:
# If a category is passed then the app and type parameters are
# mandatory because we need to find a category in the constants to
# pass to get_creatured_ids(), and category slugs are not unique.
# AddonCategoryQueryParam parses the request parameters for us to
# determine the category.
try:
category = AddonCategoryQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_creatured_ids(category, lang)
else:
# If no category is passed, only the app parameter is mandatory,
# because get_featured_ids() needs it to find the right collection
# to pick addons from. It can optionally filter by type, so we
# parse request for that as well.
try:
app = AddonAppQueryParam(
self.request).get_object_from_reverse_dict()
type_ = None
if 'type' in self.request.GET:
type_ = AddonTypeQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_featured_ids(app, lang=lang, type=type_)
# ids is going to be a random list of ids, we just slice it to get
# the number of add-ons that was requested. We do it before calling
# manual_order(), since it'll use the ids as part of a id__in filter.
try:
page_size = int(
self.request.GET.get('page_size', api_settings.PAGE_SIZE))
except ValueError:
raise exceptions.ParseError('Invalid page_size parameter')
ids = ids[:page_size]
return manual_order(queryset, ids, 'addons.id')
class StaticCategoryView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = StaticCategorySerializer
def get_queryset(self):
return sorted(CATEGORIES_BY_ID.values(), key=lambda x: x.id)
@classmethod
def as_view(cls, **kwargs):
view = super(StaticCategoryView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def finalize_response(self, request, response, *args, **kwargs):
response = super(StaticCategoryView, self).finalize_response(
request, response, *args, **kwargs)
patch_cache_control(response, max_age=60 * 60 * 6)
return response
class LanguageToolsView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = LanguageToolsSerializer
def get_queryset(self):
try:
application_id = AddonAppQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError('Invalid app parameter.')
types = (amo.ADDON_DICT, amo.ADDON_LPAPP)
return Addon.objects.public().filter(
appsupport__app=application_id, type__in=types,
target_locale__isnull=False).exclude(target_locale='')
def list(self, request, *args, **kwargs):
# Ignore pagination (return everything) but do wrap the data in a
# 'results' property to mimic what the default implementation of list()
# does in DRF.
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class ReplacementAddonView(ListAPIView):
authentication_classes = []
queryset = ReplacementAddon.objects.all()
serializer_class = ReplacementAddonSerializer
|
from decimal import Decimal
import graphene
import pytest
from django_countries.fields import Country
from saleor.checkout import calculations
from saleor.graphql.payment.enums import OrderAction, PaymentChargeStatusEnum
from saleor.payment.interface import CreditCardInfo, CustomerSource, TokenConfig
from saleor.payment.models import ChargeStatus, Payment, TransactionKind
from saleor.payment.utils import fetch_customer_id, store_customer_id
from tests.api.utils import get_graphql_content
VOID_QUERY = """
mutation PaymentVoid($paymentId: ID!) {
paymentVoid(paymentId: $paymentId) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_void_success(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment_txn_preauth.pk)
variables = {"paymentId": payment_id}
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentVoid"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.is_active is False
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
def test_payment_void_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment_txn_preauth.pk)
variables = {"paymentId": payment_id}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
response = staff_api_client.post_graphql(
VOID_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentVoid"]
assert data["errors"]
assert data["errors"][0]["field"] is None
assert data["errors"][0]["message"] == "Unable to void the transaction."
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.charge_status == ChargeStatus.NOT_CHARGED
assert payment_txn_preauth.is_active is True
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.VOID
assert not txn.is_success
CREATE_QUERY = """
mutation CheckoutPaymentCreate($checkoutId: ID!, $input: PaymentInput!) {
checkoutPaymentCreate(checkoutId: $checkoutId, input: $input) {
payment {
transactions {
kind,
token
}
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_checkout_add_payment(
user_api_client, checkout_with_item, graphql_address_data
):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": "Dummy",
"token": "sample-token",
"amount": total.gross.amount,
"billingAddress": graphql_address_data,
},
}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["errors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_checkout_add_payment_default_amount(
user_api_client, checkout_with_item, graphql_address_data
):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": "DUMMY",
"token": "sample-token",
"billingAddress": graphql_address_data,
},
}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert not data["errors"]
transactions = data["payment"]["transactions"]
assert not transactions
payment = Payment.objects.get()
assert payment.checkout == checkout
assert payment.is_active
assert payment.token == "sample-token"
assert payment.total == total.gross.amount
assert payment.currency == total.gross.currency
assert payment.charge_status == ChargeStatus.NOT_CHARGED
def test_checkout_add_payment_bad_amount(
user_api_client, checkout_with_item, graphql_address_data
):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": "DUMMY",
"token": "sample-token",
"amount": str(
calculations.checkout_total(checkout).gross.amount + Decimal(1)
),
"billingAddress": graphql_address_data,
},
}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
assert data["errors"]
def test_use_checkout_billing_address_as_payment_billing(
user_api_client, checkout_with_item, address
):
checkout = checkout_with_item
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
total = calculations.checkout_total(checkout)
variables = {
"checkoutId": checkout_id,
"input": {
"gateway": "Dummy",
"token": "sample-token",
"amount": total.gross.amount,
},
}
response = user_api_client.post_graphql(CREATE_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutPaymentCreate"]
# check if proper error is returned if address is missing
assert data["errors"][0]["field"] == "billingAddress"
assert (
data["errors"][0]["message"]
== "No billing address associated with this checkout."
)
# assign the address and try again
address.street_address_1 = "spanish-inqusition"
address.save()
checkout.billing_address = address
checkout.save()
response = user_api_client.post_graphql(CREATE_QUERY, variables)
get_graphql_content(response)
checkout.refresh_from_db()
assert checkout.payments.count() == 1
payment = checkout.payments.first()
assert payment.billing_address_1 == address.street_address_1
CAPTURE_QUERY = """
mutation PaymentCapture($paymentId: ID!, $amount: Decimal!) {
paymentCapture(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_capture_success(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payment_capture_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_preauth
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 0}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert len(data["errors"]) == 1
assert data["errors"][0]["message"] == "Amount should be a positive number."
def test_payment_capture_with_payment_non_authorized_yet(
staff_api_client, permission_manage_orders, payment_dummy
):
"""Ensure capture a payment that is set as authorized is failing with
the proper error message.
"""
payment = payment_dummy
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 1}
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [
{"field": None, "message": "Cannot find successful auth transaction"}
]
def test_payment_capture_gateway_error(
staff_api_client, permission_manage_orders, payment_txn_preauth, monkeypatch
):
payment = payment_txn_preauth
assert payment.charge_status == ChargeStatus.NOT_CHARGED
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment_txn_preauth.total)}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
response = staff_api_client.post_graphql(
CAPTURE_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentCapture"]
assert data["errors"] == [{"field": None, "message": "Unable to process capture"}]
payment_txn_preauth.refresh_from_db()
assert payment.charge_status == ChargeStatus.NOT_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
assert not txn.is_success
REFUND_QUERY = """
mutation PaymentRefund($paymentId: ID!, $amount: Decimal!) {
paymentRefund(paymentId: $paymentId, amount: $amount) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_refund_success(
staff_api_client, permission_manage_orders, payment_txn_captured
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment.total)}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert not data["errors"]
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
def test_payment_refund_with_invalid_argument(
staff_api_client, permission_manage_orders, payment_txn_captured
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": 0}
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert len(data["errors"]) == 1
assert data["errors"][0]["message"] == "Amount should be a positive number."
def test_payment_refund_error(
staff_api_client, permission_manage_orders, payment_txn_captured, monkeypatch
):
payment = payment_txn_captured
payment.charge_status = ChargeStatus.FULLY_CHARGED
payment.captured_amount = payment.total
payment.save()
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"paymentId": payment_id, "amount": str(payment.total)}
monkeypatch.setattr("saleor.payment.gateways.dummy.dummy_success", lambda: False)
response = staff_api_client.post_graphql(
REFUND_QUERY, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["paymentRefund"]
assert data["errors"] == [{"field": None, "message": "Unable to process refund"}]
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.transactions.count() == 2
txn = payment.transactions.last()
assert txn.kind == TransactionKind.REFUND
assert not txn.is_success
CONFIRM_QUERY = """
mutation PaymentConfirm($paymentId: ID!) {
paymentSecureConfirm(paymentId: $paymentId) {
payment {
id,
chargeStatus
}
errors {
field
message
}
}
}
"""
def test_payment_confirmation_success(
user_api_client, payment_txn_preauth, graphql_address_data
):
payment_id = graphene.Node.to_global_id("Payment", payment_txn_preauth.pk)
variables = {"paymentId": payment_id}
response = user_api_client.post_graphql(CONFIRM_QUERY, variables)
content = get_graphql_content(response)
data = content["data"]["paymentSecureConfirm"]
assert not data["errors"]
payment_txn_preauth.refresh_from_db()
assert payment_txn_preauth.charge_status == ChargeStatus.FULLY_CHARGED
assert payment_txn_preauth.transactions.count() == 2
txn = payment_txn_preauth.transactions.last()
assert txn.kind == TransactionKind.CAPTURE
def test_payments_query(
payment_txn_captured, permission_manage_orders, staff_api_client
):
query = """ {
payments(first: 20) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
billingAddress {
country {
code
country
}
firstName
lastName
cityArea
countryArea
city
companyName
streetAddress1
streetAddress2
postalCode
}
transactions {
amount {
currency
amount
}
}
creditCard {
expMonth
expYear
brand
firstDigits
lastDigits
}
}
}
}
}
"""
response = staff_api_client.post_graphql(
query, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
data = content["data"]["payments"]["edges"][0]["node"]
pay = payment_txn_captured
assert data["gateway"] == pay.gateway
amount = str(data["capturedAmount"]["amount"])
assert Decimal(amount) == pay.captured_amount
assert data["capturedAmount"]["currency"] == pay.currency
total = str(data["total"]["amount"])
assert Decimal(total) == pay.total
assert data["total"]["currency"] == pay.currency
assert data["chargeStatus"] == PaymentChargeStatusEnum.FULLY_CHARGED.name
assert data["billingAddress"] == {
"firstName": pay.billing_first_name,
"lastName": pay.billing_last_name,
"city": pay.billing_city,
"cityArea": pay.billing_city_area,
"countryArea": pay.billing_country_area,
"companyName": pay.billing_company_name,
"streetAddress1": pay.billing_address_1,
"streetAddress2": pay.billing_address_2,
"postalCode": pay.billing_postal_code,
"country": {
"code": pay.billing_country_code,
"country": Country(pay.billing_country_code).name,
},
}
assert data["actions"] == [OrderAction.REFUND.name]
txn = pay.transactions.get()
assert data["transactions"] == [
{"amount": {"currency": pay.currency, "amount": float(str(txn.amount))}}
]
assert data["creditCard"] == {
"expMonth": pay.cc_exp_month,
"expYear": pay.cc_exp_year,
"brand": pay.cc_brand,
"firstDigits": pay.cc_first_digits,
"lastDigits": pay.cc_last_digits,
}
def test_query_payment(payment_dummy, user_api_client, permission_manage_orders):
query = """
query payment($id: ID!) {
payment(id: $id) {
id
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
variables = {"id": payment_id}
response = user_api_client.post_graphql(
query, variables, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
received_id = content["data"]["payment"]["id"]
assert received_id == payment_id
def test_query_payments(payment_dummy, permission_manage_orders, staff_api_client):
query = """
{
payments(first: 20) {
edges {
node {
id
}
}
}
}
"""
payment = payment_dummy
payment_id = graphene.Node.to_global_id("Payment", payment.pk)
response = staff_api_client.post_graphql(
query, {}, permissions=[permission_manage_orders]
)
content = get_graphql_content(response)
edges = content["data"]["payments"]["edges"]
payment_ids = [edge["node"]["id"] for edge in edges]
assert payment_ids == [payment_id]
@pytest.fixture
def braintree_customer_id():
return "1234"
@pytest.fixture
def dummy_customer_id():
return "4321"
def test_store_payment_gateway_meta(customer_user, braintree_customer_id):
gateway_name = "braintree"
META = {
"payment-gateways": {
gateway_name.upper(): {"customer_id": braintree_customer_id}
}
}
store_customer_id(customer_user, gateway_name, braintree_customer_id)
assert customer_user.private_meta == META
customer_user.refresh_from_db()
assert fetch_customer_id(customer_user, gateway_name) == braintree_customer_id
@pytest.fixture
def token_config_with_customer(braintree_customer_id):
return TokenConfig(customer_id=braintree_customer_id)
@pytest.fixture
def set_braintree_customer_id(customer_user, braintree_customer_id):
gateway_name = "braintree"
store_customer_id(customer_user, gateway_name, braintree_customer_id)
return customer_user
@pytest.fixture
def set_dummy_customer_id(customer_user, dummy_customer_id):
gateway_name = "dummy"
store_customer_id(customer_user, gateway_name, dummy_customer_id)
return customer_user
def test_list_payment_sources(
mocker, dummy_customer_id, set_dummy_customer_id, user_api_client
):
query = """
{
me {
storedPaymentSources {
gateway
creditCardInfo {
lastDigits
}
}
}
}
"""
card = CreditCardInfo(
last_4="5678", exp_year=2020, exp_month=12, name_on_card="JohnDoe"
)
source = CustomerSource(id="test1", gateway="dummy", credit_card_info=card)
mock_get_source_list = mocker.patch(
"saleor.graphql.account.resolvers.gateway.list_payment_sources",
return_value=[source],
autospec=True,
)
response = user_api_client.post_graphql(query)
mock_get_source_list.assert_called_once_with("Dummy", dummy_customer_id)
content = get_graphql_content(response)["data"]["me"]["storedPaymentSources"]
assert content is not None and len(content) == 1
assert content[0] == {"gateway": "dummy", "creditCardInfo": {"lastDigits": "5678"}}
|
import os
import argparse
from .. lib import (
eb,
s3,
parameters
)
import boto
def get_argument_parser():
parser = argparse.ArgumentParser("ebzl delete")
parameters.add_profile(parser, required=False)
parameters.add_app_name(parser)
parameters.add_version_label(parser, required=True)
parameters.add_region(parser, required=False)
return parser
def delete_eb_version(args):
layer1 = eb.get_layer1(profile=args.profile, region=args.region)
kwargs = {
"application_name": args.app_name,
"version_label": args.version,
"delete_source_bundle": False
}
try:
layer1.delete_application_version(**kwargs)
except boto.exception.BotoServerError as exc:
print(exc.message)
exit()
def run(argv):
args = parameters.parse(parser=get_argument_parser(),
argv=argv,
postprocessors=[parameters.add_default_region])
delete_eb_version(args)
|
class SenderNotCallable(Exception):
pass
|
import io
import qi
import time
import vision_definitions
from django.conf import settings
from django.core.files.images import ImageFile
from django.utils.functional import cached_property
from django.utils.six import BytesIO
from mock import MagicMock
from PIL import Image
from wagtail.wagtailimages.models import Image as WagtailImage
ANIMATIONS = [
"animations/Stand/Emotions/Negative/Angry_1",
"animations/Stand/Emotions/Negative/Anxious_1",
"animations/Stand/Emotions/Negative/Bored_2",
"animations/Stand/Emotions/Negative/Disappointed_1",
"animations/Stand/Emotions/Negative/Exhausted_2",
"animations/Stand/Emotions/Negative/Fearful_1",
"animations/Stand/Emotions/Negative/Sad_1",
"animations/Stand/Emotions/Negative/Sorry_1",
"animations/Stand/Emotions/Negative/Surprise_2",
"animations/Stand/Emotions/Neutral/AskForAttention_1",
"animations/Stand/Emotions/Neutral/Confused_1",
"animations/Stand/Emotions/Neutral/Determined_1",
"animations/Stand/Emotions/Neutral/Hello_1",
"animations/Stand/Emotions/Positive/Amused_1",
"animations/Stand/Emotions/Positive/Enthusiastic_1",
"animations/Stand/Emotions/Positive/Excited_2",
"animations/Stand/Emotions/Positive/Happy_2",
"animations/Stand/Emotions/Positive/Hysterical_1",
"animations/Stand/Emotions/Positive/Interested_1",
"animations/Stand/Emotions/Positive/Laugh_1",
"animations/Stand/Emotions/Positive/Proud_1",
"animations/Stand/Emotions/Positive/Relieved_1",
"animations/Stand/Emotions/Positive/Shy_1",
"animations/Stand/Emotions/Positive/Sure_1",
"animations/Stand/Emotions/Positive/Winner_1",
"animations/Stand/Gestures/Angry_1",
"animations/Stand/Gestures/Applause_1",
"animations/Stand/Gestures/No_1",
"animations/Stand/Gestures/Yes_2",
"animations/Stand/Reactions/SeeSomething_4",
"animations/Stand/Reactions/TouchHead_1",
"animations/Stand/Waiting/LoveYou_1",
"animations/Stand/Waiting/Think_1",
"animations/Sit/BodyTalk/Listening/Listening_1",
"animations/Sit/BodyTalk/Thinking/Remember_1",
"animations/Sit/Emotions/Negative/Angry_1",
"animations/Sit/Emotions/Negative/Fear_1",
"animations/Sit/Emotions/Negative/Sad_1",
"animations/Sit/Emotions/Negative/Surprise_1",
"animations/Sit/Emotions/Neutral/AskForAttention_1",
"animations/Sit/Emotions/Neutral/Sneeze_1",
"animations/Sit/Emotions/Positive/Happy_1",
"animations/Sit/Emotions/Positive/Hungry_1",
"animations/Sit/Emotions/Positive/Laugh_1",
"animations/Sit/Emotions/Positive/Shy_1",
"animations/Sit/Emotions/Positive/Winner_1",
"animations/Sit/Gestures/ComeOn_1",
"animations/Sit/Reactions/Heat_1",
"animations/Sit/Reactions/LightShine_1",
"animations/Sit/Reactions/TouchHead_3",
"animations/Sit/Waiting/Bored_1"
]
class NaoConnection(object):
def __init__(self):
if getattr(settings, "NAO_MOCK", False):
self.app = MagicMock()
else:
self.app = qi.Application(url="tcp://172.16.0.107:9559")
self.app.start()
self.session = self.app.session
@cached_property
def voice(self):
voice = self.session.service("ALTextToSpeech")#pitchShift
voice.setParameter("pitchShift", 1.0)
voice.setParameter("speed", 100)
return voice
@cached_property
def postureProxy(self):
return self.session.service("ALRobotPosture")
@cached_property
def alive(self):
alive = self.session.service("ALAutonomousMoves")
alive.setBackgroundStrategy('backToNeutral')
return alive
@cached_property
def selfaware(self):
return self.session.service("ALBasicAwareness")
@cached_property
def motion(self):
return self.session.service("ALMotion")
def findFaces(self):
self.motion.wakeUp()
if not self.selfaware.isAwarenessRunning():
self.selfaware.startAwareness()
if not self.alive.getExpressiveListeningEnabled():
self.alive.setExpressiveListeningEnabled(True)
def stop(self):
if self.animation.getRunningBehaviors():
self.animation.stopAllBehaviors()
self.stopFindingFaces()
def stopFindingFaces(self):
if self.selfaware.isAwarenessRunning():
self.selfaware.stopAwareness()
if self.alive.getExpressiveListeningEnabled():
self.alive.setExpressiveListeningEnabled(False)
@cached_property
def camera(self):
return self.session.service("ALVideoDevice")
def asyncTakePicturePNG(self, name):
qi.async(self.takePicturePNG, name)
def takePicturePNG(self, name):
resolution = vision_definitions.k4VGA
colorSpace = vision_definitions.kRGBColorSpace
fps = 5
nameId = self.camera.subscribe("python_GVM", resolution, colorSpace, fps)
pic = self.camera.getImageRemote(nameId)
self.camera.unsubscribe(nameId)
imageWidth = pic[0]
imageHeight = pic[1]
array = pic[6]
f = BytesIO()
# Create a PIL Image from our pixel array.
im = Image.frombytes("RGB", (imageWidth, imageHeight), str(array))
# Save the image.
im.save(f, "PNG")
img = ImageFile(f, name='{}.png'.format(name))
image = WagtailImage(file=img, title=name)
image.save()
@cached_property
def animation(self):
return self.session.service("ALBehaviorManager")
def playAsync(self, name):
qi.async(self.play, name)
def play(self, name):
posture = self.postureProxy.getPosture()
if name.startswith("animations/Stand/") and not "Stand" in posture:
self.postureProxy.goToPosture("Stand", 1.0)
elif name.startswith("animations/Sit/") and not "Sit" in posture:
self.postureProxy.goToPosture("Sit", 1.0)
self.animation.runBehavior(name)
def say(self, message):
return self.voice.say(message, _async=True)
def main():
try:
conn = NaoConnection()
conn.findFaces()
#conn.alive
#conn.voice.say("I got some \\pau=1\\ \\emph=2\\ \\vol=150\\swag, \\vol=100\\\\emph=0\\\\rspd=50\\don't it \\emph=2\\bieaahtch")
#conn.takePicturePNG('henk.png')
print "jjkh"
# for a in ANIMATIONS:
# print a
# conn.play(a)
while True:
time.sleep(10)
except KeyboardInterrupt:
conn.stop()
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize, SimpleEventSerializer
from sentry.api.serializers.models.event import SharedEventSerializer
from sentry.models import EventError
from sentry.testutils import TestCase
from sentry.utils.samples import load_data
from sentry.testutils.helpers.datetime import iso_format, before_now
class EventSerializerTest(TestCase):
def test_simple(self):
event_id = "a" * 32
event = self.store_event(
data={"event_id": event_id, "timestamp": iso_format(before_now(minutes=1))},
project_id=self.project.id,
)
result = serialize(event)
assert result["id"] == event_id
assert result["eventID"] == event_id
def test_eventerror(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"stacktrace": [u"ü"],
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert len(result["errors"]) == 1
assert "data" in result["errors"][0]
assert result["errors"][0]["type"] == EventError.INVALID_DATA
assert result["errors"][0]["data"] == {
u"name": u"stacktrace",
u"reason": u"expected rawstacktrace",
u"value": [u"\xfc"],
}
assert "startTimestamp" not in result
assert "timestamp" not in result
def test_hidden_eventerror(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"breadcrumbs": [u"ü"],
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["errors"] == []
def test_renamed_attributes(self):
# Only includes meta for simple top-level attributes
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"extra": {"extra": True},
"modules": {"modules": "foobar"},
"_meta": {
"extra": {"": {"err": ["extra error"]}},
"modules": {"": {"err": ["modules error"]}},
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["context"] == {"extra": True}
assert result["_meta"]["context"] == {"": {"err": ["extra error"]}}
assert result["packages"] == {"modules": "foobar"}
assert result["_meta"]["packages"] == {"": {"err": ["modules error"]}}
def test_message_interface(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"logentry": {"formatted": "bar"},
"_meta": {"logentry": {"formatted": {"": {"err": ["some error"]}}}},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["message"] == "bar"
assert result["_meta"]["message"] == {"": {"err": ["some error"]}}
def test_message_formatted(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"logentry": {"formatted": "baz"},
"_meta": {"logentry": {"formatted": {"": {"err": ["some error"]}}}},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["message"] == "baz"
assert result["_meta"]["message"] == {"": {"err": ["some error"]}}
def test_tags_tuples(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"level": "error", # creates a derived tag.
"timestamp": iso_format(before_now(minutes=1)),
"tags": [["foo", "foo"], ["bar", "bar"], ["last", "tag"], None],
"_meta": {
"tags": {
"0": {"1": {"": {"err": ["foo error"]}}},
"1": {"0": {"": {"err": ["bar error"]}}},
"3": {"": {"err": ["full error"]}},
}
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
# Expect 3 custom tags + derived "level". The ``None``` entry is removed
# by the serializer as it cannot be rendered. Such entries are generated
# by Relay normalization.
assert len(result["tags"]) == 4
assert result["tags"][0]["value"] == "bar"
assert result["tags"][1]["value"] == "foo"
assert result["_meta"]["tags"]["0"]["key"] == {"": {"err": ["bar error"]}}
assert result["_meta"]["tags"]["1"]["value"] == {"": {"err": ["foo error"]}}
assert result["_meta"]["tags"].get("2") is None
def test_tags_dict(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"tags": {"foo": "foo", "bar": "bar", "last": "tag"},
"_meta": {
"tags": {
"foo": {"": {"err": ["foo error"]}},
"bar": {"": {"err": ["bar error"]}},
}
},
},
project_id=self.project.id,
assert_no_errors=False,
)
result = serialize(event)
assert result["tags"][0]["value"] == "bar"
assert result["tags"][1]["value"] == "foo"
assert result["_meta"]["tags"]["0"]["value"] == {"": {"err": ["bar error"]}}
assert result["_meta"]["tags"]["1"]["value"] == {"": {"err": ["foo error"]}}
assert result["_meta"]["tags"].get("2") is None
def test_none_interfaces(self):
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"breadcrumbs": None,
"exception": None,
"logentry": None,
"request": None,
"user": None,
"contexts": None,
"sdk": None,
"_meta": None,
},
project_id=self.project.id,
)
result = serialize(event)
assert not any(e["type"] == "breadcrumbs" for e in result["entries"])
assert not any(e["type"] == "exception" for e in result["entries"])
assert not any(e["type"] == "message" for e in result["entries"])
assert not any(e["type"] == "request" for e in result["entries"])
assert result["user"] is None
assert result["sdk"] is None
assert result["contexts"] == {}
assert "startTimestamp" not in result
def test_transaction_event(self):
event_data = load_data("transaction")
event = self.store_event(data=event_data, project_id=self.project.id)
result = serialize(event)
assert isinstance(result["endTimestamp"], float)
assert result["endTimestamp"] == event.data.get("timestamp")
assert isinstance(result["startTimestamp"], float)
assert result["startTimestamp"] == event.data.get("start_timestamp")
assert "dateCreated" not in result
assert "crashFile" not in result
assert "fingerprints" not in result
assert "measurements" in result
assert result["measurements"] == event_data["measurements"]
def test_transaction_event_empty_spans(self):
event_data = load_data("transaction")
event_data["spans"] = []
event = self.store_event(data=event_data, project_id=self.project.id)
result = serialize(event)
assert result["entries"][0]["type"] == "spans"
class SharedEventSerializerTest(TestCase):
def test_simple(self):
event = self.store_event(
data={"event_id": "a" * 32, "timestamp": iso_format(before_now(minutes=1))},
project_id=self.project.id,
)
result = serialize(event, None, SharedEventSerializer())
assert result["id"] == "a" * 32
assert result["eventID"] == "a" * 32
assert result.get("context") is None
assert result.get("contexts") is None
assert result.get("user") is None
assert result.get("tags") is None
assert "sdk" not in result
assert "errors" not in result
for entry in result["entries"]:
assert entry["type"] != "breadcrumbs"
class SimpleEventSerializerTest(TestCase):
def test_user(self):
"""
Use the SimpleEventSerializer to serialize an event
"""
event = self.store_event(
data={
"event_id": "a" * 32,
"timestamp": iso_format(before_now(minutes=1)),
"user": {"email": "test@test.com"},
},
project_id=self.project.id,
)
result = serialize(event, None, SimpleEventSerializer())
assert result["eventID"] == event.event_id
assert result["projectID"] == six.text_type(event.project_id)
assert result["groupID"] == six.text_type(event.group.id)
assert result["message"] == event.message
assert result["title"] == event.title
assert result["location"] == event.location
assert result["culprit"] == event.culprit
assert result["dateCreated"] == event.datetime
assert result["user"]["id"] == event.get_minimal_user().id
assert result["user"]["email"] == event.get_minimal_user().email
assert result["user"]["username"] == event.get_minimal_user().username
assert result["user"]["ip_address"] == event.get_minimal_user().ip_address
assert result["tags"] == [
{"key": "level", "value": "error"},
{"key": "user", "value": "email:test@test.com", "query": 'user.email:"test@test.com"'},
]
def test_no_group(self):
"""
Use the SimpleEventSerializer to serialize an event without group
"""
event = self.store_event(
data={
"event_id": "a" * 32,
"start_timestamp": iso_format(before_now(minutes=1)),
"timestamp": iso_format(before_now(minutes=1)),
"user": {"email": "test@test.com"},
"type": "transaction",
"transaction": "api.issue.delete",
"spans": [],
"contexts": {"trace": {"op": "foobar", "trace_id": "a" * 32, "span_id": "a" * 16}},
},
project_id=self.project.id,
)
result = serialize(event, None, SimpleEventSerializer())
assert result["groupID"] is None
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='R_Old_Navy',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('item_name', models.CharField(max_length=120)),
('item_title', models.CharField(max_length=120)),
('item_id', models.CharField(max_length=200)),
('item_url', models.URLField(max_length=500)),
('pub_date', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('item_image', models.ImageField(upload_to='retailer/r_old_navy', default='retailer/r_old_navy/no-img.jpg')),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Source.reference'
db.alter_column('payment_source', 'reference', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Changing field 'Source.label'
db.alter_column('payment_source', 'label', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Changing field 'Transaction.status'
db.alter_column('payment_transaction', 'status', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Changing field 'Transaction.reference'
db.alter_column('payment_transaction', 'reference', self.gf('django.db.models.fields.CharField')(default='', max_length=128))
# Changing field 'Bankcard.partner_reference'
db.alter_column('payment_bankcard', 'partner_reference', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
def backwards(self, orm):
# Changing field 'Source.reference'
db.alter_column('payment_source', 'reference', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'Source.label'
db.alter_column('payment_source', 'label', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'Transaction.status'
db.alter_column('payment_transaction', 'status', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'Transaction.reference'
db.alter_column('payment_transaction', 'reference', self.gf('django.db.models.fields.CharField')(max_length=128, null=True))
# Changing field 'Bankcard.partner_reference'
db.alter_column('payment_bankcard', 'partner_reference', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['auth.User']"})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'payment.bankcard': {
'Meta': {'object_name': 'Bankcard'},
'card_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'expiry_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'partner_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bankcards'", 'to': "orm['auth.User']"})
},
'payment.source': {
'Meta': {'object_name': 'Source'},
'amount_allocated': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'amount_debited': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'amount_refunded': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sources'", 'to': "orm['order.Order']"}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'source_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['payment.SourceType']"})
},
'payment.sourcetype': {
'Meta': {'object_name': 'SourceType'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'payment.transaction': {
'Meta': {'object_name': 'Transaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transactions'", 'to': "orm['payment.Source']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'txn_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['payment']
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "BoxCox", sigma = 0.0, exog_count = 20, ar_order = 12);
|
"""
Dependencies lists the functions and types required by a function
"""
from pythran.passmanager import ModuleAnalysis
from pythran.tables import MODULES
import ast
import math
class Dependencies(ModuleAnalysis):
def __init__(self):
self.result = set()
super(Dependencies, self).__init__()
def visit_List(self, node):
self.result.add(('__builtin__', 'list'))
self.generic_visit(node)
def visit_Tuple(self, node):
self.result.add(('__builtin__', 'tuple'))
self.generic_visit(node)
def visit_Set(self, node):
self.result.add(('__builtin__', 'set'))
self.generic_visit(node)
def visit_Dict(self, node):
self.result.add(('__builtin__', 'dict'))
self.generic_visit(node)
def visit_Str(self, node):
self.result.add(('__builtin__', 'str'))
self.generic_visit(node)
def visit_Pow(self, node):
self.result.add(('__builtin__', 'pow'))
self.generic_visit(node)
def visit_In(self, node):
self.result.add(('__builtin__', 'in'))
self.generic_visit(node)
visit_NotIn = visit_In
def visit_Is(self, node):
self.result.add(('__builtin__', 'id'))
self.generic_visit(node)
def visit_IfExp(self, node):
self.result.add(('__builtin__', 'bool_'))
self.generic_visit(node)
visit_And = visit_Or = visit_IfExp
visit_IsNot = visit_Is
def visit_Print(self, node):
self.result.add(('__builtin__', 'print'))
self.generic_visit(node)
def visit_Assert(self, node):
self.result.add(('__builtin__', 'assert'))
self.generic_visit(node)
def visit_Yield(self, node):
self.result.add(('utils', 'yield'))
self.generic_visit(node)
def visit_Mod(self, node):
self.result.add(('operator_', 'mod'))
def visit_FloorDiv(self, node):
self.result.add(('operator_', 'floordiv'))
def visit_Num(self, node):
if type(node.n) is complex:
self.result.add(('types', 'complex'))
elif type(node.n) is long:
self.result.add(('types', 'long'))
elif math.isnan(node.n):
self.result.add(('numpy', 'nan'))
elif math.isinf(node.n):
self.result.add(('numpy', 'inf'))
self.generic_visit(node)
def visit_Attribute(self, node):
def rec(w, n):
if isinstance(n, ast.Name):
return (n.id,)
elif isinstance(n, ast.Attribute):
return rec(w, n.value) + (n.attr,)
attr = rec(MODULES, node)
attr and self.result.add(attr)
|
from pysal.model.spvcm._constants import TEST_SEED, CLASSTYPES
from pysal.model.spvcm.tests.utils import run_with_seed
from pysal.model.spvcm import lower_level as M
from pysal.model.spvcm.abstracts import Sampler_Mixin
from pysal.model.spvcm.utils import south
import pandas as pd
import os
FULL_PATH = os.path.dirname(os.path.abspath(__file__))
def build():
models = []
for cand in M.__dict__.values():
if isinstance(cand, CLASSTYPES):
if issubclass(cand, Sampler_Mixin):
models.append(cand)
for model in models:
print('starting {}'.format(model))
env = south()
del env['M']
run_with_seed(model, env=env, seed=TEST_SEED, fprefix=FULL_PATH + '/data/')
return os.listdir(FULL_PATH + '/data/')
|
"""
Verifies that msvs_prebuild and msvs_postbuild can be specified in both
VS 2008 and 2010.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
test.run_gyp('buildevents.gyp', '-G', 'msvs_version=2008')
test.must_contain('main.vcproj', 'Name="VCPreBuildEventTool"')
test.must_contain('main.vcproj', 'Name="VCPostBuildEventTool"')
test.run_gyp('buildevents.gyp', '-G', 'msvs_version=2010')
test.must_contain('main.vcxproj', '<PreBuildEvent>')
test.must_contain('main.vcxproj', '<PostBuildEvent>')
test.pass_test()
|
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
class EmailModelBackend(ModelBackend):
"""Бэкэнд для авторизации пользователей при помощи пары email и пароль
"""
def authenticate(self, email=None, password=None):
UserModel = get_user_model()
try:
user = UserModel.objects.get_by_natural_key(email)
if user.check_password(password):
return user
except UserModel.DoesNotExist:
return None
|
from mustaine import protocol
from mustaine.client import HessianProxy
test = HessianProxy("http://hessian.caucho.com/test/test")
# assert test.argDouble_0_001(0.001) is True
def test_encode_double_127_0():
assert test.argDouble_127_0(127.0) is True
|
import os
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
USE_TZ = True
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
SEND_BROKEN_LINK_EMAILS = False
ADMINS = (
('David Winterbottom', 'david.winterbottom@tangentlabs.co.uk'),
)
EMAIL_SUBJECT_PREFIX = '[Oscar sandbox] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'db.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND':
'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en-gb'
LANGUAGES = (
('en-gb', 'English'),
('da', 'Danish'),
('de', 'German'),
('el', 'Greek'),
('en', 'English'),
('es', 'Spanish'),
('fr', 'French'),
('it', 'Italian'),
('ja', 'Japanese'),
('pl', 'Polish'),
('pt', 'Portugese'),
('ru', 'Russian'),
('sk', 'Slovakian'),
)
ROSETTA_STORAGE_CLASS = 'rosetta.storage.SessionRosettaStorage'
ROSETTA_ENABLE_TRANSLATION_SUGGESTIONS = True
ROSETTA_REQUIRES_AUTH = False
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = location("public/media")
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public/static')
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
'oscar.apps.customer.notifications.context_processors.notifications',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
# Enable the ProfileMiddleware, then add ?cprofile to any
# URL path to print out profile details
#'oscar.profiling.middleware.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s',
},
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'checkout_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'checkout.log',
'formatter': 'verbose'
},
'gateway_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'gateway.log',
'formatter': 'simple'
},
'error_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'errors.log',
'formatter': 'verbose'
},
'sorl_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'sorl.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins', 'error_file'],
'level': 'ERROR',
'propagate': False,
},
'oscar.checkout': {
'handlers': ['console', 'checkout_file'],
'propagate': True,
'level': 'INFO',
},
'gateway': {
'handlers': ['gateway_file'],
'propagate': True,
'level': 'INFO',
},
'sorl.thumbnail': {
'handlers': ['sorl_file'],
'propagate': True,
'level': 'INFO',
},
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
'level': 'DEBUG',
},
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django_extensions',
'debug_toolbar',
'south',
'rosetta', # For i18n testing
'compressor',
'apps.user', # For profile testing
'apps.gateway', # For allowing dashboard access
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
AUTH_PROFILE_MODULE = 'user.Profile'
INTERNAL_IPS = ('127.0.0.1',)
def is_internal(request):
ip_addr = request.META['REMOTE_ADDR']
return ip_addr in INTERNAL_IPS or ip_addr.startswith('192.168')
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': is_internal
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
#'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
from oscar.defaults import *
OSCAR_SHOP_NAME = 'Oscar Sandbox'
OSCAR_SHOP_TAGLINE = 'e-Commerce for Django'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
DISPLAY_VERSION = False
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Processed', 'Cancelled',),
'Cancelled': (),
}
USE_LESS = False
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
'use_less': USE_LESS,
}
COMPRESS_OUTPUT_DIR = 'oscar'
LOG_ROOT = location('logs')
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
THUMBNAIL_DEBUG = True
THUMBNAIL_KEY_PREFIX = 'oscar-sandbox'
try:
from settings_local import *
except ImportError:
pass
|
'''misc routines for dealing with binary data'''
def hex2bin(str):
'''
take a hexadecimal number as a string and convert it to a binary string
'''
bin = ['0000', '0001', '0010', '0011',
'0100', '0101', '0110', '0111',
'1000', '1001', '1010', '1011',
'1100', '1101', '1110', '1111']
aa = ''
for i in range(len(str)):
aa += bin[int(str[i], base=16)]
return aa
def checksum(str):
'''twos complement checksum as used by the ox wagon PLC'''
command = str[1:len(str) - 4]
sum = 0
for i in range(0, len(command), 2):
byte = command[i] + command[i + 1]
sum = sum + int(byte, base=16)
neg = ~sum & 0xFF
return neg + 1
|
import nbformat
def empty_notebook(fname):
with open(fname, 'r') as fp:
nb = nbformat.read(fp, as_version=4)
for cell in nb.cells:
if cell['cell_type'] == 'code':
source = cell['source']
if ('# preserve' in source) or ('#preserve' in source):
continue
else:
# Don't preserve cell
cell['outputs'].clear()
cell['execution_count'] = None
cell['source'] = '\n'.join([l for l in source.splitlines() if l.startswith('#')])
return nb
if __name__ == '__main__':
import glob
import os.path
if not os.path.exists("notebooks_vacios"):
os.mkdir("notebooks_vacios")
for fname in glob.glob("notebooks/*.ipynb"):
new_fname = os.path.join("notebooks_vacios", os.path.basename(fname))
with open(new_fname, 'w') as fp:
nbformat.write(empty_notebook(fname), fp)
|
import subprocess
import sys
import os
import hashlib
import Alfred
import romkan
handler = Alfred.Handler(sys.argv, use_no_query_string=False)
anything_matched = False
playlist_name = ""
recache_identifier = ".disallow-recache"
user_home = os.path.expanduser("~")
artwork_cache_path = os.path.join(user_home, ".maestro-cache/")
if not os.path.exists(artwork_cache_path):
os.makedirs(artwork_cache_path)
def get_artwork(track_name):
file_name = hashlib.md5(track_name).hexdigest()
items = os.listdir(artwork_cache_path)
if not os.path.exists(recache_identifier):
sprocess = subprocess.Popen(["osascript", "get_artwork.scpt", track_name, artwork_cache_path, file_name], stdout=subprocess.PIPE)
for item in items:
if file_name in item:
return os.path.join(artwork_cache_path, item)
return "no_album_art.jpg"
def disallow_recache():
os.system("touch %s" % recache_identifier)
try:
# get playlist name
sprocess = subprocess.Popen(["osascript", "get_playlist_name.scpt"], stdout=subprocess.PIPE)
playlist_name = sprocess.stdout.read()
# remove empty line
playlist_name = playlist_name[:-1]
# get all playlist tracks
sprocess = subprocess.Popen(["osascript", "get_playlist_tracks.scpt"], stdout=subprocess.PIPE)
playlist_track_infos_raw = sprocess.stdout.read()
# remove empty line
playlist_track_infos_raw = playlist_track_infos_raw[:-1]
playlist_track_infos = playlist_track_infos_raw.split('\n')
playlist_tracks = []
for raw_info in playlist_track_infos:
info = raw_info.split('!MAESTRO!')
track_artist = info[0]
track_name = info[1]
album_name = info[2]
track_id = info[3]
playlist_tracks.append({"artist": track_artist, "name": track_name, "album": album_name, "track_id": track_id})
for track in playlist_tracks:
match_in_name = handler.query.lower() in track["name"].lower()
match_in_artist = handler.query.lower() in track["artist"].lower()
match_in_album = handler.query.lower() in track["album"].lower()
match_in_name_kana = handler.query.lower() in romkan.to_roma(unicode(track["name"], "utf-8"))
match_in_artist_kana = handler.query.lower() in romkan.to_roma(unicode(track["artist"], "utf-8"))
match_in_album_kana = handler.query.lower() in romkan.to_roma(unicode(track["album"], "utf-8"))
if match_in_name or match_in_artist or match_in_album or match_in_name_kana or match_in_artist_kana or match_in_album_kana:
subtitle = None
if len(track["album"]) > 0:
subtitle = "%s [%s]" % (track["artist"], track["album"])
else:
subtitle = track["artist"]
handler.add_new_item(title=track["name"], subtitle=subtitle, arg=track["track_id"], icon=get_artwork(track["name"]))
anything_matched = True
except:
pass
if not anything_matched:
if playlist_name == "":
# get playlists
sprocess = subprocess.Popen(["osascript", "get_playlists.scpt"], stdout=subprocess.PIPE)
raw_playlists = sprocess.stdout.read()
# remove empty line
raw_playlists = raw_playlists[:-1]
playlists = raw_playlists.split('\n')
playlist_list = []
if len(playlists) > 0:
for playlist in playlists:
stuff = playlist.split('!MAESTRO!')
playlist_name = stuff[0]
playlist_count = int(stuff[1])
# don't show empty playlists
if playlist_count > 0:
playlist_list.append({"name": playlist_name, "count": playlist_count})
for playlist in playlist_list:
if handler.query.lower() in playlist["name"].lower():
handler.add_new_item(title=playlist["name"], subtitle="%s songs in this playlist." % playlist["count"], icon="default.png", arg=playlist["name"])
else:
handler.add_new_item(title="No playlists found", icon="default.png")
else:
handler.add_new_item(title="Couldn't find a song which match '%s'" % handler.query, icon="default.png")
disallow_recache()
handler.push()
|
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os.path
import textwrap
import warnings
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from astropy import __version__
from .card import Card, BLANK_CARD
from .header import Header
from astropy.utils.decorators import deprecated_renamed_argument
from .hdu.hdulist import fitsopen, HDUList # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.diff import (report_diff_values, fixed_width_indent,
where_not_allclose, diff_values)
__all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff',
'TableDataDiff']
_COL_ATTRS = [('unit', 'units'), ('null', 'null values'),
('bscale', 'bscales'), ('bzero', 'bzeros'),
('disp', 'display formats'), ('dim', 'dimensions')]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are identical. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(getattr(self, attr) for attr in self.__dict__
if attr.startswith('diff_'))
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like object, string, or None (optional)
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, str):
if os.path.exists(fileobj) and not overwrite:
raise OSError("File {} exists, aborting (pass in "
"overwrite=True to overwrite)".format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, 'w')
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + '\n')
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(self, a, b, ignore_hdus=[], ignore_keywords=[],
ignore_comments=[], ignore_fields=[],
numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True, tolerance=None):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, str):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError("error opening file a ({}): {}: {}".format(
a, exc.__class__.__name__, exc.args[0]))
close_a = True
else:
close_a = False
if isinstance(b, str):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError("error opening file b ({}): {}: {}".format(
b, exc.__class__.__name__, exc.args[0]))
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = set(k.upper() for k in ignore_hdus)
self.ignore_keywords = set(k.upper() for k in ignore_keywords)
self.ignore_comments = set(k.upper() for k in ignore_comments)
self.ignore_fields = set(k.upper() for k in ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != '*' and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
# Record filenames for use later in _report
self.filenamea = self.a.filename()
if not self.filenamea:
self.filenamea = '<{} object at {:#x}>'.format(
self.a.__class__.__name__, id(self.a))
self.filenameb = self.b.filename()
if not self.filenameb:
self.filenameb = '<{} object at {:#x}>'.format(
self.b.__class__.__name__, id(self.b))
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
self.a = HDUList([h for h in self.a if h.name not in fnmatch.filter(
a_names, pattern)])
self.b = HDUList([h for h in self.b if h.name not in fnmatch.filter(
b_names, pattern)])
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
self.diff_hdus.append((idx, hdu_diff))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=' ',
subsequent_indent=' ')
self._fileobj.write('\n')
self._writeln(f' fitsdiff: {__version__}')
self._writeln(f' a: {self.filenamea}\n b: {self.filenameb}')
if self.ignore_hdus:
ignore_hdus = ' '.join(sorted(self.ignore_hdus))
self._writeln(' HDU(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_hdus)))
if self.ignore_hdu_patterns:
ignore_hdu_patterns = ' '.join(sorted(self.ignore_hdu_patterns))
self._writeln(' HDU(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_hdu_patterns)))
if self.ignore_keywords:
ignore_keywords = ' '.join(sorted(self.ignore_keywords))
self._writeln(' Keyword(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_keywords)))
if self.ignore_comments:
ignore_comments = ' '.join(sorted(self.ignore_comments))
self._writeln(' Keyword(s) whose comments are not to be compared'
':\n{}'.format(wrapper.fill(ignore_comments)))
if self.ignore_fields:
ignore_fields = ' '.join(sorted(self.ignore_fields))
self._writeln(' Table column(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_fields)))
self._writeln(' Maximum number of different data values to be '
'reported: {}'.format(self.numdiffs))
self._writeln(' Relative tolerance: {}, Absolute tolerance: {}'
.format(self.rtol, self.atol))
if self.diff_hdu_count:
self._fileobj.write('\n')
self._writeln('Files contain different numbers of HDUs:')
self._writeln(' a: {}'.format(self.diff_hdu_count[0]))
self._writeln(' b: {}'.format(self.diff_hdu_count[1]))
if not self.diff_hdus:
self._writeln('No differences found between common HDUs.')
return
elif not self.diff_hdus:
self._fileobj.write('\n')
self._writeln('No differences found.')
return
for idx, hdu_diff in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write('\n')
self._writeln('Primary HDU:')
else:
self._fileobj.write('\n')
self._writeln(f'Extension HDU {idx}:')
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True, tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : str or `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'):
self.diff_extension_types = (self.a.header.get('XTENSION'),
self.b.header.get('XTENSION'))
self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(),
self.b.header.copy())
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data,
self.b.data)
elif (isinstance(self.a, _TableLikeHDU) and
isinstance(self.b, _TableLikeHDU)):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data,
self.b.data)
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data,
self.b.data)
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(" Extension types differ:\n a: {}\n "
"b: {}".format(*self.diff_extension_types))
if self.diff_extnames:
self._writeln(" Extension names differ:\n a: {}\n "
"b: {}".format(*self.diff_extnames))
if self.diff_extvers:
self._writeln(" Extension versions differ:\n a: {}\n "
"b: {}".format(*self.diff_extvers))
if self.diff_extlevels:
self._writeln(" Extension levels differ:\n a: {}\n "
"b: {}".format(*self.diff_extlevels))
if not self.diff_headers.identical:
self._fileobj.write('\n')
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write('\n')
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True,
tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError('HeaderDiff can only diff astropy.io.fits.Header '
'objects or strings containing FITS headers.')
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa,
pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb,
pattern))
if '*' in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if '*' in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(' Headers have different number of cards:')
self._writeln(' a: {}'.format(self.diff_keyword_count[0]))
self._writeln(' b: {}'.format(self.diff_keyword_count[1]))
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(' Extra keyword {!r:8} in a: {!r}'.format(
keyword, val))
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(' Extra keyword {!r:8} in b: {!r}'.format(
keyword, val))
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(' Inconsistent duplicates of keyword {!r:8}:'
.format(keyword))
self._writeln(' Occurs {} time(s) in a, {} times in (b)'
.format(*count))
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(self._fileobj, 'values',
self.diff_keyword_values, keyword,
ind=self._indent)
report_diff_keyword_attr(self._fileobj, 'comments',
self.diff_keyword_comments, keyword,
ind=self._indent)
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0, tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (np.issubdtype(self.a.dtype, np.inexact) or
np.issubdtype(self.b.dtype, np.inexact)):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[0]))
dimsb = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[1]))
self._writeln(' Data dimensions differ:')
self._writeln(f' a: {dimsa}')
self._writeln(f' b: {dimsb}')
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(f' Data differs at {index}:')
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different pixels found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (self.diff_dimensions[0][0],
self.diff_dimensions[1][0])
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(' Data sizes differ:')
self._writeln(' a: {} bytes'.format(self.diff_dimensions[0]))
self._writeln(' b: {} bytes'.format(self.diff_dimensions[1]))
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(f' Data differs at byte {index}:')
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
self._writeln(' ...')
self._writeln(' {} different bytes found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0,
tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if '*' in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(colsa_set.intersection(colsb_set),
key=operator.attrgetter('name'))
self.common_column_names = {col.name.lower()
for col in self.common_columns}
left_only_columns = {col.name.lower(): col
for col in colsa_set.difference(colsb_set)}
right_only_columns = {col.name.lower(): col
for col in colsb_set.difference(colsa_set)}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb)))
arra = self.a[col.name]
arrb = self.b[col.name]
if (np.issubdtype(arra.dtype, np.floating) and
np.issubdtype(arrb.dtype, np.floating)):
diffs = where_not_allclose(arra, arrb,
rtol=self.rtol,
atol=self.atol)
elif 'P' in col.format:
diffs = ([idx for idx in range(len(arra))
if not np.allclose(arra[idx], arrb[idx],
rtol=self.rtol,
atol=self.atol)],)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx),
(arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(' Tables have different number of columns:')
self._writeln(' a: {}'.format(self.diff_column_count[0]))
self._writeln(' b: {}'.format(self.diff_column_count[1]))
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(' Extra column {} of format {} in a'.format(
name, format))
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(' Extra column {} of format {} in b'.format(
name, format))
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(' Column {} has different {}:'.format(
name, col_attrs[attr]))
report_diff_values(vals[0], vals[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_rows:
self._writeln(' Table rows differ:')
self._writeln(' a: {}'.format(self.diff_rows[0]))
self._writeln(' b: {}'.format(self.diff_rows[1]))
self._writeln(' No further data comparison performed.')
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(' Column {} data differs in row {}:'.format(*indx))
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(' ...{} additional difference(s) found.'.format(
self.diff_total - self.numdiffs))
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different table data element(s) found '
'({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ''
else:
dup = '[{}]'.format(idx + 1)
fileobj.write(
fixed_width_indent(' Keyword {:8}{} has different {}:\n'
.format(keyword, dup, attr), ind))
report_diff_values(val[0], val[1], fileobj=fileobj,
indent_width=ind + 1)
|
"""
Copyright (C) 2014, Alex Izvorski
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of scikit-video nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
from __future__ import division
import numpy
import subprocess
import json
class VideoCapture:
"""
Read video using avconv or ffmpeg in a subprocess.
The API is modelled after cv2.VideoCapture, and in many cases is a drop-in replacement.
"""
def __init__(self, filename=None, frameSize=None):
self.filename = filename
# TODO find either avconv or ffmpeg, remember which one we found
self.convert_command = "avconv"
self.probe_command = "avprobe"
self.proc = None
if frameSize:
self.do_resize = True
self.width, self.height = frameSize
else:
self.do_resize = False
if self.filename:
self.info = self.get_info()
if len(self.info["streams"]) == 0:
raise ValueError("No streams found")
if self.info["streams"][0]["codec_type"] != "video":
raise ValueError("No video stream found")
self.src_width = self.info["streams"][0]["width"]
self.src_height = self.info["streams"][0]["height"]
if not self.do_resize:
self.width = self.src_width
self.height = self.src_height
self.depth = 3 # TODO other depths
# print "Found video: %d x %d" %(self.width, self.height)
self.open()
def open(self):
# TODO decide what is best behavior, reopen or leave as it if previously opened
if self.isOpened():
self.release()
cmd = [self.convert_command, '-loglevel', 'error', '-i', self.filename]
if self.do_resize:
cmd += ['-vf', 'scale=%d:%d' %(self.width, self.height)]
cmd += ['-f', 'rawvideo', '-pix_fmt', 'rgb24', '-']
self.proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
self.buf = b''
def isOpened(self):
return (self.proc != None)
def read(self):
retval = True
nbytes = self.width * self.height * self.depth
while len(self.buf) < nbytes:
# Could poll here, but return code never seems to be set before we fail at reading anyway
# self.proc.poll()
if self.proc.returncode != None:
if self.proc.returncode < 0:
raise ValueError("Command exited with return code %d" % (self.proc.returncode)) # TODO subprocess.CalledProcessError?
else:
return False, None
buf = self.proc.stdout.read( nbytes - len(self.buf) )
# print "Read %d" % (len(buf))
# Reading no data seems to be a reliable end-of-file indicator; return code is not.
if len(buf) == 0:
break
self.buf += buf
if len(self.buf) < nbytes:
# We didn't get any data, assume end-of-file
if len(self.buf) == 0:
return False, None
# We got some data but not enough, this is an error
else:
raise ValueError("Not enough data at end of file, expected %d bytes, read %d" % (nbytes, len(self.buf)))
image = numpy.fromstring(self.buf[:nbytes], dtype=numpy.uint8).reshape((self.height, self.width, self.depth))
# If there is data left over, move it to beginning of buffer for next frame
if len(self.buf) > nbytes:
self.buf = self.buf[nbytes:] # TODO this is a relatively slow operation, optimize
# Otherwise just forget the buffer
else:
self.buf = b''
return retval, image
def seek(self, time):
raise NotImplementedError()
def release(self):
self.proc.kill()
self.proc = None
self.buf = None
def get(self, propId):
# CV_CAP_PROP_FRAME_COUNT
raise NotImplementedError()
def set(self, propId, value):
raise NotImplementedError()
def get_info(self):
# NOTE requires a fairly recent avprobe/ffprobe, older versions don't have -of json and only produce INI-like output
# TODO parse old INI-like output
cmd = [self.probe_command] + "-loglevel error -of json -show_format -show_streams".split() + [self.filename]
output = subprocess.check_output(cmd, universal_newlines=True)
info = json.loads(output)
return info
class VideoWriter:
def __init__(self, filename, fourcc='XVID', fps=30, frameSize=(640, 480), isColor=True):
self.filename = filename
self.convert_command = "avconv"
self.fourcc = fourcc
self.fps = fps
self.width, self.height = frameSize
self.depth = 3 # TODO other depths
if not isColor:
raise NotImplementedError()
def open(self):
cmd = [self.convert_command, '-loglevel', 'error', '-f', 'rawvideo', '-pix_fmt', 'rgb24', '-s', '%dx%d' %(self.width, self.height), '-r', str(self.fps), '-i', '-']
codecs_map = {
'XVID': 'mpeg4',
'DIVX': 'mpeg4',
'H264': 'libx264',
'MJPG': 'mjpeg',
}
if self.fourcc in codecs_map:
vcodec = codecs_map[self.fourcc]
else:
vcodec = self.fourcc
cmd += ['-vcodec', vcodec]
cmd += [self.filename]
self.proc = subprocess.Popen(cmd, stdin=subprocess.PIPE)
def isOpened(self):
return (self.proc != None)
def write(self, image):
if image.shape[0] != self.height or image.shape[1] != self.width or image.shape[2] != self.depth:
raise ValueError('Image dimensions do not match')
self.proc.stdin.write( image.astype(numpy.uint8).tostring() )
def release(self):
self.proc.stdin.close()
self.proc.wait()
self.proc = None
|
from .providers import METRICS_PROVIDERS
def get_provider_choices():
"""Returns a list of currently available metrics providers
suitable for use as model fields choices.
"""
choices = []
for provider in METRICS_PROVIDERS:
choices.append((provider.alias, provider.title))
return choices
def get_providers_by_alias():
"""Returns a dictionary with currently available metrics providers
classes indexed by their aliases.
"""
providers = {}
for provider in METRICS_PROVIDERS:
providers[provider.alias] = provider
return providers
|
import pandas as pd
def concurrent_cagetreatment(df, cagestays,
protect_duplicates=[
'Animal_id',
'Cage_id',
'Cage_Treatment_start_date',
'Cage_Treatment_end_date',
'Cage_TreatmentProtocol_code',
'Treatment_end_date',
'Treatment_end_date',
'TreatmentProtocol_code',
],
):
"""
Return a `pandas.DataFrame` object containing only `Cage_Treatment*` entries which are concurrent with the animal stay in the cage to which they were administered.
Parameters
----------
df : pandas.DataFrame
Pandas Dataframe, with columns containing:
`Animal_id`,
`Animal_death_date`,
`CageStay_start_date`,
`Cage_Treatment_start_date`,
`Cage_TreatmentProtocol_code`.
cagestays : pandas.DataFrame
Pandas Dataframe, with columns containing:
`Animal_id`,
`CageStay_end_date`,
`CageStay_start_date`,
Notes
-----
This function checks whether cage-level treatment onsets indeed happened during the period in which the animal was housed in the cage.
We do not check for the treatment end dates, as an animal which has received a partial treatment has received a treatment.
Checks for treatment discontinuation due to e.g. death should be performed elsewhere.
"""
drop_idx = []
for subject in list(df['Animal_id'].unique()):
stay_starts = df[df['Animal_id']==subject]['CageStay_start_date'].tolist()
# The per-animal treatment info is recorded in each table row, but if the animal only has one cage stay without a cage treatment, it will be deleted, taking the animal treatment information with it.
# We avoid this here:
blank_cells_only = False
if len(stay_starts) == 1:
if df.loc[df['Animal_id']==subject, 'TreatmentProtocol_code'].item() != None:
blank_cells_only = True
for stay_start in stay_starts:
stay_end = cagestays[(cagestays['Animal_id']==subject)&(cagestays['CageStay_start_date']==stay_start)]['CageStay_end_date'].tolist()[0]
treatment_start = df[(df['Animal_id']==subject)&(df['CageStay_start_date']==stay_start)]['Cage_Treatment_start_date'].tolist()[0]
death_date = df[df['Animal_id']==subject]['Animal_death_date'].tolist()[0]
# We do not check for treatment end dates, because often you may want to include recipients of incomplete treatments (e.g. due to death) when filtering based on cagestays.
# Filtering based on death should be done elsewhere.
if treatment_start <= stay_start or treatment_start >= stay_end or treatment_start >= death_date:
if blank_cells_only:
df.loc[df['Animal_id']==subject, ['Cage_TreatmentProtocol_code', 'Cage_Treatment_start_date', 'Cage_Treatment_end_date', 'Cage_Treatment_protocol_id']] = None
else:
drop_idx.extend(df[(df['Animal_id']==subject)&(df['CageStay_start_date']==stay_start)].index.tolist())
df = df.drop(drop_idx)
#df = df.drop_duplicates(subset=protect_duplicates)
return df
def make_identifier_short_form(df,
index_name="Animal_id"):
"""
Convert the long form `AnimalExternalIdentifier_identifier` column of a `pandas.DataFrame` to short-form identifier columns named after the corresponding values on the `AnimalExternalIdentifier_database` column.
Parameters
----------
df : pandas.DataFrame
A `pandas.DataFrame` object containing a long-form `AnimalExternalIdentifier_identifier` column and a dedicated `AnimalExternalIdentifier_database` column.
index_name : str, optonal
The name of a column from `df`, the values of which can be rendered unique. This column will serve as the index o the resulting dataframe.
"""
df = df.rename(columns={'AnimalExternalIdentifier_animal_id': 'Animal_id'})
df = df.set_index([index_name, 'AnimalExternalIdentifier_database'])['AnimalExternalIdentifier_identifier']
df = df.unstack(1)
return df
def collapse_rename(df, groupby, collapse,
rename=False,
):
"""
Collapse long form columns according to a lambda function, so that groupby column values are rendered unique
Parameters
----------
df : pandas.DataFrame
A `pandas.DataFrame` object which you want to collapse.
groupby : string
The name of a column from `df`, the values of which you want to render unique.
collapse : dict
A dictionary the keys of which are columns you want to collapse, and the values of which are lambda functions instructing how to collapse (e.g. concatenate) the values.
rename : dict, optional
A dictionary the keys of which are names of columns from `df`, and the values of which are new names for these columns.
"""
df = df.groupby(groupby).agg(collapse)
if rename:
df = df.rename(columns=rename)
return df
def relativize_dates(df,
date_suffix='_date',
rounding='D',
rounding_type='round',
reference_date=True,
):
"""
Express dates on each row of a Pandas Dataframe as datetime objects relative to the row value on the 'reference_date' column.
Parameters
----------
df : pandas.DataFrame
Pandas Dataframe, with columns containing 'reference_date' and strings ending in `date_suffix`.
date_suffix : str, optional
String sufix via which to identify date columns needing manipulation.
rounding : str, optional
Datetime increment for date rounding.
rounding_type : {'round','floor','ceil'}, optional
Whether to round the dates (splits e.g. days apart at noon, hours at 30 minutes, etc.) or to take the floor or the ceiling.
"""
if isinstance(reference_date, bool) and reference_date:
df['reference_date'] = df['Cage_Treatment_start_date']
elif isinstance(reference_date, str):
df['reference_date'] = df[reference_date]
date_columns = [i for i in df.columns.tolist() if i.endswith(date_suffix)]
for date_column in date_columns:
try:
df[date_column] = df[date_column]-df['reference_date']
except TypeError:
pass
else:
if rounding:
start = pd.to_datetime('1970-01-01')
df[date_column] = df[date_column] + start
if rounding_type == 'round':
df[date_column] = df[date_column].dt.round(rounding)
elif rounding_type == 'floor':
df[date_column] = df[date_column].dt.floor(rounding)
elif rounding_type == 'ceil':
df[date_column] = df[date_column].dt.ceil(rounding)
return df
|
"""A utility for generating classes for structured metrics events.
It takes as input a structured.xml file describing all events and produces a
c++ header and implementation file exposing builders for those events.
"""
import argparse
import sys
import model
import events_template
import compile_time_validation
parser = argparse.ArgumentParser(
description='Generate structured metrics events')
parser.add_argument('--input', help='Path to structured.xml')
parser.add_argument('--output', help='Path to generated files.')
def main(argv):
args = parser.parse_args()
data = model.XML_TYPE.Parse(open(args.input).read())
relpath = 'components/metrics/structured'
compile_time_validation.validate(data)
events_template.WriteFiles(args.output, relpath, data)
return 0
if '__main__' == __name__:
sys.exit(main(sys.argv))
|
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
import pytz
from corehq.apps.hqcase.dbaccessors import get_cases_in_domain
from corehq.apps.reports.standard import CustomProjectReport
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.sms.models import ExpectedCallback, CALLBACK_PENDING, CALLBACK_RECEIVED, CALLBACK_MISSED
from datetime import datetime, timedelta, time
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.parsing import json_format_date
class MissedCallbackReport(CustomProjectReport, GenericTabularReport):
name = ugettext_noop("Missed Callbacks")
slug = "missed_callbacks"
description = ugettext_noop("Summarizes two weeks of SMS / Callback interactions for all participants.")
flush_layout = True
def get_past_two_weeks(self):
now = datetime.utcnow()
local_datetime = ServerTime(now).user_time(self.timezone).done()
return [(local_datetime + timedelta(days = x)).date() for x in range(-14, 0)]
@property
def headers(self):
args = [
DataTablesColumn(_("Participant ID")),
DataTablesColumn(_("Total No Response")),
DataTablesColumn(_("Total Indicated")),
DataTablesColumn(_("Total Pending")),
]
args += [DataTablesColumn(date.strftime("%b %d")) for date in self.get_past_two_weeks()]
return DataTablesHeader(*args)
@property
def rows(self):
group_id = None
if self.request.couch_user.is_commcare_user():
group_ids = self.request.couch_user.get_group_ids()
if len(group_ids) > 0:
group_id = group_ids[0]
data = {}
for case in get_cases_in_domain(self.domain, type='participant'):
if case.closed:
continue
# If a site coordinator is viewing the report, only show participants from that site (group)
if group_id is None or group_id == case.owner_id:
timezone = pytz.timezone(case.get_case_property("time_zone"))
data[case._id] = {
"name": case.name,
"time_zone": timezone,
"dates": [None] * 14,
}
dates = self.get_past_two_weeks()
date_strings = [json_format_date(date) for date in dates]
start_date = dates[0] - timedelta(days=1)
end_date = dates[-1] + timedelta(days=2)
expected_callback_events = ExpectedCallback.by_domain(
self.domain,
start_date=datetime.combine(start_date, time(0, 0)),
end_date=datetime.combine(end_date, time(0, 0))
).order_by('date')
for event in expected_callback_events:
if event.couch_recipient in data:
timezone = data[event.couch_recipient]["time_zone"]
event_date = (ServerTime(event.date).user_time(timezone)
.ui_string("%Y-%m-%d"))
if event_date in date_strings:
data[event.couch_recipient]["dates"][date_strings.index(event_date)] = event.status
result = []
for case_id, data_dict in data.items():
row = [
self._fmt(data_dict["name"]),
None,
None,
None,
]
total_no_response = 0
total_indicated = 0
total_pending = 0
for date_status in data_dict["dates"]:
if date_status == CALLBACK_PENDING:
total_indicated += 1
total_pending += 1
row.append(self._fmt(_("pending")))
elif date_status == CALLBACK_RECEIVED:
total_indicated += 1
row.append(self._fmt(_("OK")))
elif date_status == CALLBACK_MISSED:
total_indicated += 1
total_no_response += 1
row.append(self._fmt_highlight(_("No Response")))
else:
row.append(self._fmt(_("not indicated")))
if total_no_response > 0:
row[1] = self._fmt_highlight(total_no_response)
else:
row[1] = self._fmt(total_no_response)
row[2] = self._fmt(total_indicated)
row[3] = self._fmt(total_pending)
result.append(row)
return result
def _fmt(self, value):
return self.table_cell(value, '<div style="text-align:center">%s</div>' % value)
def _fmt_highlight(self, value):
return self.table_cell(value, '<div style="background-color:#f33; font-weight:bold; text-align:center">%s</div>' % value)
CUSTOM_REPORTS = (
('Custom Reports', (
MissedCallbackReport,
)),
)
|
"""
Classe for reading data in CED spike2 files (.smr).
This code is based on:
- sonpy, written by Antonio Gonzalez <Antonio.Gonzalez@cantab.net>
Disponible here ::
http://www.neuro.ki.se/broberger/
and sonpy come from :
- SON Library 2.0 for MATLAB, written by Malcolm Lidierth at
King's College London.
See http://www.kcl.ac.uk/depsta/biomedical/cfnr/lidierth.html
This IO support old (<v6) and new files (>v7) of spike2
Author: Samuel Garcia
"""
from __future__ import print_function, division, absolute_import
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
from collections import OrderedDict
class Spike2RawIO(BaseRawIO):
"""
"""
extensions = ['smr']
rawmode = 'one-file'
def __init__(self, filename='', take_ideal_sampling_rate=False, ced_units=True,
try_signal_grouping=True):
BaseRawIO.__init__(self)
self.filename = filename
self.take_ideal_sampling_rate = take_ideal_sampling_rate
self.ced_units = ced_units
self.try_signal_grouping = try_signal_grouping
def _parse_header(self):
# get header info and channel_info
with open(self.filename, 'rb') as fid:
self._global_info = read_as_dict(fid, headerDescription)
info = self._global_info
if info['system_id'] < 6:
info['dtime_base'] = 1e-6
info['datetime_detail'] = 0
info['datetime_year'] = 0
self._time_factor = info['us_per_time'] * info['dtime_base']
self._channel_infos = []
for chan_id in range(info['channels']):
fid.seek(512 + 140 * chan_id)
chan_info = read_as_dict(fid, channelHeaderDesciption1)
if chan_info['kind'] in [1, 6]:
dt = [('scale', 'f4'), ('offset', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [7, 9]:
dt = [('min', 'f4'), ('max', 'f4'), ('unit', 'S6'), ]
chan_info.update(read_as_dict(fid, dt))
elif chan_info['kind'] in [4]:
dt = [('init_low', 'u1'), ('next_low', 'u1'), ]
chan_info.update(read_as_dict(fid, dt))
if chan_info['kind'] in [1, 6, 7, 9]:
if info['system_id'] < 6:
chan_info.update(read_as_dict(fid, [('divide', 'i2')]))
else:
chan_info.update(read_as_dict(fid, [('interleave', 'i2')]))
chan_info['type'] = dict_kind[chan_info['kind']]
if chan_info['blocks'] == 0:
chan_info['t_start'] = 0. # this means empty signals
else:
fid.seek(chan_info['firstblock'])
block_info = read_as_dict(fid, blockHeaderDesciption)
chan_info['t_start'] = float(block_info['start_time']) * \
float(info['us_per_time']) * float(info['dtime_base'])
self._channel_infos.append(chan_info)
# get data blocks index for all channel
# run through all data block of of channel to prepare chan to block maps
self._memmap = np.memmap(self.filename, dtype='u1', offset=0, mode='r')
self._all_data_blocks = {}
self._by_seg_data_blocks = {}
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = []
ind = chan_info['firstblock']
for b in range(chan_info['blocks']):
block_info = self._memmap[ind:ind + 20].view(blockHeaderDesciption)[0]
data_blocks.append((ind, block_info['items'], 0,
block_info['start_time'], block_info['end_time']))
ind = block_info['succ_block']
data_blocks = np.array(data_blocks, dtype=[(
'pos', 'int32'), ('size', 'int32'), ('cumsum', 'int32'),
('start_time', 'int32'), ('end_time', 'int32')])
data_blocks['pos'] += 20 # 20 is ths header size
self._all_data_blocks[chan_id] = data_blocks
self._by_seg_data_blocks[chan_id] = []
# For all signal channel detect gaps between data block (pause in rec) so new Segment.
# then check that all channel have the same gaps.
# this part is tricky because we need to check that all channel have same pause.
all_gaps_block_ind = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 9]:
data_blocks = self._all_data_blocks[chan_id]
sig_size = np.sum(self._all_data_blocks[chan_id]['size'])
if sig_size > 0:
interval = get_sample_interval(info, chan_info) / self._time_factor
# detect gaps
inter_block_sizes = data_blocks['start_time'][1:] - \
data_blocks['end_time'][:-1]
gaps_block_ind, = np.nonzero(inter_block_sizes > interval)
all_gaps_block_ind[chan_id] = gaps_block_ind
# find t_start/t_stop for each seg based on gaps indexe
self._sig_t_starts = {}
self._sig_t_stops = {}
if len(all_gaps_block_ind) == 0:
# this means no signal channels
nb_segment = 1
# loop over event/spike channel to get the min/max time
t_start, t_stop = None, None
for chan_id, chan_info in enumerate(self._channel_infos):
data_blocks = self._all_data_blocks[chan_id]
if data_blocks.size > 0:
# if t_start is None or data_blocks[0]['start_time']<t_start:
# t_start = data_blocks[0]['start_time']
if t_stop is None or data_blocks[-1]['end_time'] > t_stop:
t_stop = data_blocks[-1]['end_time']
# self._seg_t_starts = [t_start]
self._seg_t_starts = [0]
self._seg_t_stops = [t_stop]
else:
all_nb_seg = np.array([v.size + 1 for v in all_gaps_block_ind.values()])
assert np.all(all_nb_seg[0] == all_nb_seg), \
'Signal channel have differents pause so diffrents nb_segment'
nb_segment = int(all_nb_seg[0])
for chan_id, gaps_block_ind in all_gaps_block_ind.items():
data_blocks = self._all_data_blocks[chan_id]
self._sig_t_starts[chan_id] = []
self._sig_t_stops[chan_id] = []
for seg_ind in range(nb_segment):
if seg_ind == 0:
fisrt_bl = 0
else:
fisrt_bl = gaps_block_ind[seg_ind - 1] + 1
self._sig_t_starts[chan_id].append(data_blocks[fisrt_bl]['start_time'])
if seg_ind < nb_segment - 1:
last_bl = gaps_block_ind[seg_ind]
else:
last_bl = data_blocks.size - 1
self._sig_t_stops[chan_id].append(data_blocks[last_bl]['end_time'])
in_seg_data_block = data_blocks[fisrt_bl:last_bl + 1]
in_seg_data_block['cumsum'][1:] = np.cumsum(in_seg_data_block['size'][:-1])
self._by_seg_data_blocks[chan_id].append(in_seg_data_block)
self._seg_t_starts = []
self._seg_t_stops = []
for seg_ind in range(nb_segment):
# there is a small delay between all channel so take the max/min for t_start/t_stop
t_start = min(
self._sig_t_starts[chan_id][seg_ind] for chan_id in self._sig_t_starts)
t_stop = max(self._sig_t_stops[chan_id][seg_ind] for chan_id in self._sig_t_stops)
self._seg_t_starts.append(t_start)
self._seg_t_stops.append(t_stop)
# create typed channels
sig_channels = []
unit_channels = []
event_channels = []
self.internal_unit_ids = {}
for chan_id, chan_info in enumerate(self._channel_infos):
if chan_info['kind'] in [1, 6, 7, 9]:
if self.take_ideal_sampling_rate:
sampling_rate = info['ideal_rate']
else:
sample_interval = get_sample_interval(info, chan_info)
sampling_rate = (1. / sample_interval)
name = chan_info['title']
if chan_info['kind'] in [1, 9]:
# AnalogSignal
if chan_id not in self._sig_t_starts:
continue
units = chan_info['unit']
if chan_info['kind'] == 1: # int16
gain = chan_info['scale'] / 6553.6
offset = chan_info['offset']
sig_dtype = 'int16'
elif chan_info['kind'] == 9: # float32
gain = 1.
offset = 0.
sig_dtype = 'int32'
group_id = 0
sig_channels.append((name, chan_id, sampling_rate, sig_dtype,
units, gain, offset, group_id))
elif chan_info['kind'] in [2, 3, 4, 5, 8]:
# Event
event_channels.append((name, chan_id, 'event'))
elif chan_info['kind'] in [6, 7]: # SpikeTrain with waveforms
wf_units = chan_info['unit']
if chan_info['kind'] == 6:
wf_gain = chan_info['scale'] / 6553.6
wf_offset = chan_info['offset']
wf_left_sweep = chan_info['n_extra'] // 4
elif chan_info['kind'] == 7:
wf_gain = 1.
wf_offset = 0.
wf_left_sweep = chan_info['n_extra'] // 8
wf_sampling_rate = sampling_rate
if self.ced_units:
# this is a hudge pain because need
# to jump over all blocks
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
unit_ids = set()
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
marker = raw_data['marker'] & 255
unit_ids.update(np.unique(marker))
unit_ids = sorted(list(unit_ids))
else:
# All spike from one channel are group in one SpikeTrain
unit_ids = ['all']
for unit_id in unit_ids:
unit_index = len(unit_channels)
self.internal_unit_ids[unit_index] = (chan_id, unit_id)
_id = "ch{}#{}".format(chan_id, unit_id)
unit_channels.append((name, _id, wf_units, wf_gain, wf_offset,
wf_left_sweep, wf_sampling_rate))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
if len(sig_channels) > 0:
if self.try_signal_grouping:
# try to group signals channel if same sampling_rate/dtype/...
# it can raise error for some files (when they do not have signal length)
common_keys = ['sampling_rate', 'dtype', 'units', 'gain', 'offset']
characteristics = sig_channels[common_keys]
unique_characteristics = np.unique(characteristics)
self._sig_dtypes = {}
for group_id, charact in enumerate(unique_characteristics):
chan_grp_indexes, = np.nonzero(characteristics == charact)
sig_channels['group_id'][chan_grp_indexes] = group_id
# check same size for channel in groups
for seg_index in range(nb_segment):
sig_sizes = []
for ind in chan_grp_indexes:
chan_id = sig_channels[ind]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
sig_sizes.append(sig_size)
sig_sizes = np.array(sig_sizes)
assert np.all(sig_sizes == sig_sizes[0]),\
'Signal channel in groups do not have same size'\
', use try_signal_grouping=False'
self._sig_dtypes[group_id] = np.dtype(charact['dtype'])
else:
# if try_signal_grouping fail the user can try to split each channel in
# separate group
sig_channels['group_id'] = np.arange(sig_channels.size)
self._sig_dtypes = {s['group_id']: np.dtype(s['dtype']) for s in sig_channels}
# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [nb_segment]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# Annotations
self._generate_minimal_annotations()
bl_ann = self.raw_annotations['blocks'][0]
bl_ann['system_id'] = info['system_id']
seg_ann = bl_ann['segments'][0]
seg_ann['system_id'] = info['system_id']
for c, sig_channel in enumerate(sig_channels):
chan_id = sig_channel['id']
anasig_an = seg_ann['signals'][c]
anasig_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
anasig_an['comment'] = self._channel_infos[chan_id]['comment']
for c, unit_channel in enumerate(unit_channels):
chan_id, unit_id = self.internal_unit_ids[c]
unit_an = seg_ann['units'][c]
unit_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
unit_an['comment'] = self._channel_infos[chan_id]['comment']
for c, event_channel in enumerate(event_channels):
chan_id = int(event_channel['id'])
ev_an = seg_ann['events'][c]
ev_an['physical_channel_index'] = self._channel_infos[chan_id]['phy_chan']
ev_an['comment'] = self._channel_infos[chan_id]['comment']
def _source_name(self):
return self.filename
def _segment_t_start(self, block_index, seg_index):
return self._seg_t_starts[seg_index] * self._time_factor
def _segment_t_stop(self, block_index, seg_index):
return self._seg_t_stops[seg_index] * self._time_factor
def _check_channel_indexes(self, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
channel_indexes = np.arange(self.header['signal_channels'].size)[channel_indexes]
return channel_indexes
def _get_signal_size(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
sig_size = np.sum(self._by_seg_data_blocks[chan_id][seg_index]['size'])
return sig_size
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_id = self.header['signal_channels'][channel_indexes[0]]['id']
return self._sig_t_starts[chan_id][seg_index] * self._time_factor
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
if i_start is None:
i_start = 0
if i_stop is None:
i_stop = self._get_signal_size(block_index, seg_index, channel_indexes)
channel_indexes = self._check_channel_indexes(channel_indexes)
chan_index = channel_indexes[0]
chan_id = self.header['signal_channels'][chan_index]['id']
group_id = self.header['signal_channels'][channel_indexes[0]]['group_id']
dt = self._sig_dtypes[group_id]
raw_signals = np.zeros((i_stop - i_start, len(channel_indexes)), dtype=dt)
for c, channel_index in enumerate(channel_indexes):
# NOTE: this actual way is slow because we run throught
# the file for each channel. The loop should be reversed.
# But there is no garanty that channels shared the same data block
# indexes. So this make the job too difficult.
chan_header = self.header['signal_channels'][channel_index]
chan_id = chan_header['id']
data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
# loop over data blocks and get chunks
bl0 = np.searchsorted(data_blocks['cumsum'], i_start, side='left')
bl1 = np.searchsorted(data_blocks['cumsum'], i_stop, side='left')
ind = 0
for bl in range(bl0, bl1):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
data = self._memmap[ind0:ind1].view(dt)
if bl == bl1 - 1:
# right border
# be carfull that bl could be both bl0 and bl1!!
border = data.size - (i_stop - data_blocks[bl]['cumsum'])
if border > 0:
data = data[:-border]
if bl == bl0:
# left border
border = i_start - data_blocks[bl]['cumsum']
data = data[border:]
raw_signals[ind:data.size + ind, c] = data
ind += data.size
return raw_signals
def _count_in_time_slice(self, seg_index, chan_id, lim0, lim1, marker_filter=None):
# count event or spike in time slice
data_blocks = self._all_data_blocks[chan_id]
chan_info = self._channel_infos[chan_id]
dt = get_channel_dtype(chan_info)
nb = 0
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
nb += np.sum(keep)
if ts[-1] > lim1:
break
return nb
def _get_internal_timestamp_(self, seg_index, chan_id,
t_start, t_stop, other_field=None, marker_filter=None):
chan_info = self._channel_infos[chan_id]
# data_blocks = self._by_seg_data_blocks[chan_id][seg_index]
data_blocks = self._all_data_blocks[chan_id]
dt = get_channel_dtype(chan_info)
if t_start is None:
# lim0 = 0
lim0 = self._seg_t_starts[seg_index]
else:
lim0 = int(t_start / self._time_factor)
if t_stop is None:
# lim1 = 2**32
lim1 = self._seg_t_stops[seg_index]
else:
lim1 = int(t_stop / self._time_factor)
timestamps = []
othervalues = []
for bl in range(data_blocks.size):
ind0 = data_blocks[bl]['pos']
ind1 = data_blocks[bl]['size'] * dt.itemsize + ind0
raw_data = self._memmap[ind0:ind1].view(dt)
ts = raw_data['tick']
keep = (ts >= lim0) & (ts <= lim1)
if marker_filter is not None:
keep2 = (raw_data['marker'] & 255) == marker_filter
keep = keep & keep2
timestamps.append(ts[keep])
if other_field is not None:
othervalues.append(raw_data[other_field][keep])
if ts[-1] > lim1:
break
if len(timestamps) > 0:
timestamps = np.concatenate(timestamps)
else:
timestamps = np.zeros(0, dtype='int16')
if other_field is None:
return timestamps
else:
if len(timestamps) > 0:
othervalues = np.concatenate(othervalues)
else:
othervalues = np.zeros(0, dtype=dt.fields[other_field][0])
return timestamps, othervalues
def _spike_count(self, block_index, seg_index, unit_index):
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id,
lim0, lim1, marker_filter=marker_filter)
def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
spike_timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
marker_filter=marker_filter)
return spike_timestamps
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
spike_times = spike_timestamps.astype(dtype)
spike_times *= self._time_factor
return spike_times
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index, t_start, t_stop):
unit_header = self.header['unit_channels'][unit_index]
chan_id, unit_id = self.internal_unit_ids[unit_index]
if self.ced_units:
marker_filter = unit_id
else:
marker_filter = None
timestamps, waveforms = self._get_internal_timestamp_(seg_index, chan_id,
t_start, t_stop,
other_field='waveform',
marker_filter=marker_filter)
waveforms = waveforms.reshape(timestamps.size, 1, -1)
return waveforms
def _event_count(self, block_index, seg_index, event_channel_index):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id']) # because set to string in header
lim0 = self._seg_t_starts[seg_index]
lim1 = self._seg_t_stops[seg_index]
return self._count_in_time_slice(seg_index, chan_id, lim0, lim1, marker_filter=None)
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
event_header = self.header['event_channels'][event_channel_index]
chan_id = int(event_header['id']) # because set to string in header
chan_info = self._channel_infos[chan_id]
if chan_info['kind'] == 5:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='marker')
elif chan_info['kind'] == 8:
timestamps, labels = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop,
other_field='label')
else:
timestamps = self._get_internal_timestamp_(seg_index,
chan_id, t_start, t_stop, other_field=None)
labels = np.zeros(timestamps.size, dtype='U')
labels = labels.astype('U')
durations = None
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
event_times = event_timestamps.astype(dtype)
event_times *= self._time_factor
return event_times
def read_as_dict(fid, dtype):
"""
Given a file descriptor (seek at the good place externally)
and a numpy.dtype of the binary struct return a dict.
Make conversion for strings.
"""
dt = np.dtype(dtype)
h = np.frombuffer(fid.read(dt.itemsize), dt)[0]
info = OrderedDict()
for k in dt.names:
v = h[k]
if dt[k].kind == 'S':
v = v.decode('iso-8859-1')
if len(v) > 0:
l = ord(v[0])
v = v[1:l + 1]
info[k] = v
return info
def get_channel_dtype(chan_info):
"""
Get dtype by kind.
"""
if chan_info['kind'] == 1: # Raw signal
dt = 'int16'
elif chan_info['kind'] in [2, 3, 4]: # Event data
dt = [('tick', 'i4')]
elif chan_info['kind'] in [5]: # Marker data
dt = [('tick', 'i4'), ('marker', 'i4')]
elif chan_info['kind'] in [6]: # AdcMark data (waveform)
dt = [('tick', 'i4'), ('marker', 'i4'),
# ('adc', 'S%d' % chan_info['n_extra'])]
('waveform', 'int16', chan_info['n_extra'] // 2)]
elif chan_info['kind'] in [7]: # RealMark data (waveform)
dt = [('tick', 'i4'), ('marker', 'i4'),
# ('real', 'S%d' % chan_info['n_extra'])]
('waveform', 'float32', chan_info['n_extra'] // 4)]
elif chan_info['kind'] in [8]: # TextMark data
dt = [('tick', 'i4'), ('marker', 'i4'),
('label', 'S%d' % chan_info['n_extra'])]
elif chan_info['kind'] == 9: # Float signal
dt = 'float32'
dt = np.dtype(dt)
return dt
def get_sample_interval(info, chan_info):
"""
Get sample interval for one channel
"""
if info['system_id'] in [1, 2, 3, 4, 5]: # Before version 5
sample_interval = (chan_info['divide'] * info['us_per_time'] *
info['time_per_adc']) * 1e-6
else:
sample_interval = (chan_info['l_chan_dvd'] *
info['us_per_time'] * info['dtime_base'])
return sample_interval
headerDescription = [
('system_id', 'i2'),
('copyright', 'S10'),
('creator', 'S8'),
('us_per_time', 'i2'),
('time_per_adc', 'i2'),
('filestate', 'i2'),
('first_data', 'i4'), # i8
('channels', 'i2'),
('chan_size', 'i2'),
('extra_data', 'i2'),
('buffersize', 'i2'),
('os_format', 'i2'),
('max_ftime', 'i4'), # i8
('dtime_base', 'f8'),
('datetime_detail', 'u1'),
('datetime_year', 'i2'),
('pad', 'S52'),
('comment1', 'S80'),
('comment2', 'S80'),
('comment3', 'S80'),
('comment4', 'S80'),
('comment5', 'S80'),
]
channelHeaderDesciption1 = [
('del_size', 'i2'),
('next_del_block', 'i4'), # i8
('firstblock', 'i4'), # i8
('lastblock', 'i4'), # i8
('blocks', 'i2'),
('n_extra', 'i2'),
('pre_trig', 'i2'),
('free0', 'i2'),
('py_sz', 'i2'),
('max_data', 'i2'),
('comment', 'S72'),
('max_chan_time', 'i4'), # i8
('l_chan_dvd', 'i4'), # i8
('phy_chan', 'i2'),
('title', 'S10'),
('ideal_rate', 'f4'),
('kind', 'u1'),
('unused1', 'i1'),
]
blockHeaderDesciption = [
('pred_block', 'i4'), # i8
('succ_block', 'i4'), # i8
('start_time', 'i4'), # i8
('end_time', 'i4'), # i8
('channel_num', 'i2'),
('items', 'i2'),
]
dict_kind = {
0: 'empty',
1: 'Adc',
2: 'EventFall',
3: 'EventRise',
4: 'EventBoth',
5: 'Marker',
6: 'AdcMark',
7: 'RealMark',
8: 'TextMark',
9: 'RealWave',
}
|
import numpy as np
from holoviews.element import Bivariate
from .testplot import TestPlotlyPlot
class TestBivariatePlot(TestPlotlyPlot):
def test_bivariate_state(self):
bivariate = Bivariate(([3, 2, 1], [0, 1, 2]))
state = self._get_plot_state(bivariate)
self.assertEqual(state['data'][0]['type'], 'histogram2dcontour')
self.assertEqual(state['data'][0]['x'], np.array([3, 2, 1]))
self.assertEqual(state['data'][0]['y'], np.array([0, 1, 2]))
self.assertEqual(state['layout']['xaxis']['range'], [1, 3])
self.assertEqual(state['layout']['yaxis']['range'], [0, 2])
self.assertEqual(state['data'][0]['contours']['coloring'], 'lines')
def test_bivariate_filled(self):
bivariate = Bivariate(([3, 2, 1], [0, 1, 2])).options(
filled=True)
state = self._get_plot_state(bivariate)
self.assertEqual(state['data'][0]['contours']['coloring'], 'fill')
def test_bivariate_ncontours(self):
bivariate = Bivariate(([3, 2, 1], [0, 1, 2])).options(ncontours=5)
state = self._get_plot_state(bivariate)
self.assertEqual(state['data'][0]['ncontours'], 5)
self.assertEqual(state['data'][0]['autocontour'], False)
|
"""Auto-generated file, do not edit by hand. IQ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IQ = PhoneMetadata(id='IQ', country_code=964, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[1-7]\\d{7,9}', possible_number_pattern='\\d{6,10}', possible_length=(8, 9, 10), possible_length_local_only=(6, 7)),
fixed_line=PhoneNumberDesc(national_number_pattern='1\\d{7}|(?:2[13-5]|3[02367]|4[023]|5[03]|6[026])\\d{6,7}', possible_number_pattern='\\d{6,9}', example_number='12345678', possible_length=(8, 9), possible_length_local_only=(6, 7)),
mobile=PhoneNumberDesc(national_number_pattern='7[3-9]\\d{8}', possible_number_pattern='\\d{10}', example_number='7912345678', possible_length=(10,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(1)(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['1'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='([2-6]\\d)(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['[2-6]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(7\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['7'], national_prefix_formatting_rule='0\\1')])
|
"""Test segzufy."""
from segzify import __version__
def test_version():
"""Test version."""
assert __version__ == "0.1.0"
|
import os
from flask.ext.script import Manager, Shell
from flask.ext.migrate import MigrateCommand
from widelanguagedemo.app import create_app
from widelanguagedemo.settings import DevConfig, ProdConfig
from widelanguagedemo.database import db
if os.environ.get("WIDELANGUAGEDEMO_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
manager = Manager(app)
TEST_CMD = "py.test tests"
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main(['tests', '--verbose'])
return exit_code
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
from gevent import monkey; monkey.patch_all()
import argparse
import beaker.middleware
from contextlib import closing
from datetime import datetime, timedelta
import functools
import getpass
import json
import logging
import os
import re
import sys
import tempfile
import bottle
import gevent
import geventwebsocket
import psycopg2
import deco
from deco_webui import __version__
session_opts = {
'session.type': 'cookie',
'session.key': 'deco.session',
'session.validate_key': 'abcdef',
}
app = beaker.middleware.SessionMiddleware(bottle.app(), session_opts)
connect_kwargs = None
_cursors = {}
def _get_session():
return bottle.request.environ.get('beaker.session')
def signed_in(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
session = _get_session()
if 'user' in session:
return func(*args, **kwargs)
else:
bottle.redirect('/signin')
return wrap
def _connect():
session = _get_session()
try:
conn = deco.connect(database=session['user'],
user=session['user'],
password=session['password'])
except (KeyError, psycopg2.Error):
raise RuntimeError('database connection failed')
return conn
def _wrap_value(x):
if x is None:
y = {'v': x, 'f': 'NULL'}
elif hasattr(x, 'isoformat'):
y = {'v': x.isoformat()}
else:
y = {'v': unicode(x)}
return y
def execute(ws, query):
sqls = [x[:-1].strip() for x in re.findall(
r"(?:[^';]|'(?:\\'|[^'])*')+[^;]*;",
bottle.touni(query) + ';')]
if len(sqls) > 1 and [1 for x in sqls if x[:6].upper() == 'SELECT']:
ws.send(json.dumps(
{'error': 'SELECT statements must be executed alone'}))
return
try:
with _connect() as conn, closing(conn.cursor()) as cursor:
def ws_send(action, row):
if action == 'shift' or action == 'terminate':
ws.send(json.dumps({'a': action[0]}))
else: # populate, add, remove
wrapped_row = [_wrap_value(x) for x in row]
ws.send(json.dumps({'a': action[0], 'r': wrapped_row}))
for sql in sqls:
cursor.execute(sql, callback=ws_send)
if cursor.description:
session_id = _get_session()['_id']
_cursors[session_id] = cursor
ws.send(json.dumps(
{'a': 'd', 'c': [x[0] for x in cursor.description]}))
cursor.fetchone()
del _cursors[session_id]
else:
ws.send(json.dumps({'error': None}))
# pylint: disable=W0703
except (RuntimeError, deco.Error, psycopg2.Error, Exception) as e:
ws.send(json.dumps({'error': unicode(e)}))
def executebackend(ws, query):
sqls = [x[:-1].strip() for x in re.findall(
r"(?:[^';]|'(?:\\'|[^'])*')+[^;]*;",
bottle.touni(query) + ';')]
if len(sqls) > 1 and [1 for x in sqls if x[:6].upper() == 'SELECT']:
ws.send(json.dumps(
{'error': 'SELECT statements must be executed alone'}))
return
try:
with _connect() as conn, closing(conn.cursor()) as cursor:
for sql in sqls:
cursor._executebackend(sql)
if cursor.description:
ws.send(json.dumps(
{'a': 'd', 'c': [x[0] for x in cursor.description]}))
for row in cursor:
wrapped_row = [_wrap_value(x) for x in row]
ws.send(json.dumps({'a': 'p', 'r': wrapped_row}))
ws.send(json.dumps({'a': 's'}))
ws.send(json.dumps({'a': 't'}))
else:
ws.send(json.dumps({'error': None}))
except (RuntimeError, psycopg2.Error) as e:
ws.send(json.dumps({'error': unicode(e)}))
@bottle.route('/websocket')
def websocket():
ws = bottle.request.environ.get('wsgi.websocket')
if ws:
try:
while True:
message = ws.receive()
if message is None:
break
elif message[0] == 'b':
executebackend(ws, message[1:])
elif message[0] == 'd':
execute(ws, message[1:])
except geventwebsocket.WebSocketError as e:
sys.stderr.write(unicode(e) + '\n')
finally:
ws.close()
else:
bottle.abort(400, 'Bad Request')
class _WebSocketHandler(logging.Handler):
def __init__(self, ws):
super(_WebSocketHandler, self).__init__()
self.ws = ws
def emit(self, record):
self.ws.send(json.dumps(
[record.asctime, record.levelname, record.message]))
@bottle.route('/log')
def log():
ws = bottle.request.environ.get('wsgi.websocket')
if ws:
handler = _WebSocketHandler(ws)
handler.setLevel(logging.INFO)
logging.root.addHandler(handler)
try:
while True:
message = ws.receive()
if message is None:
break
except geventwebsocket.WebSocketError as e:
sys.stderr.write(unicode(e) + '\n')
finally:
logging.root.removeHandler(handler)
ws.close()
else:
bottle.abort(400, 'Bad Request')
@bottle.get('/stopexecution')
def stopexecution():
session_id = _get_session()['_id']
cursor = _cursors.get(session_id)
if cursor:
cursor._stopexecution()
@bottle.post('/explain')
def explain():
sqls = [x[:-1].strip() for x in re.findall(
r"(?:[^';]|'(?:\\'|[^'])*')+[^;]*;",
bottle.touni(bottle.request.forms.get('query')) + ';')]
if len(sqls) > 1:
return {'error': 'only one SELECT statement can be explained'}
elif len(sqls) == 0:
return {'error': None, 'plan': None}
try:
with _connect() as conn, closing(conn.cursor()) as cursor:
plan = cursor._explain(sqls[0], True)
error = None
except (RuntimeError, deco.Error, psycopg2.Error) as e:
error = unicode(e)
plan = None
return {'error': error, 'plan': plan}
@bottle.get('/static/:path#.+#')
def static(path):
static_dir = os.path.join(os.path.dirname(__file__), 'static')
bottle.response.headers['Cache-Control'] = 'public, max-age=31536000'
if path.endswith('.css') or path.endswith('.js'):
bottle.response.headers['Vary'] = 'Accept-Encoding'
if 'gzip' in bottle.request.headers.get('Accept-Encoding'):
gzipped = bottle.static_file(path + '.gz', root=static_dir)
if isinstance(gzipped, bottle.HTTPResponse):
return gzipped
return bottle.static_file(path, root=static_dir)
@bottle.get('/')
@signed_in
@bottle.jinja2_view(
os.path.join(os.path.dirname(__file__), 'templates', 'index.html'))
def index():
session = _get_session()
database = session.get('user')
return dict(database=database, version=__version__)
@bottle.get('/signin')
@bottle.jinja2_view(
os.path.join(os.path.dirname(__file__), 'templates', 'signin.html'))
def signin():
session = _get_session()
error_message = session.get('error', '')
session.delete()
session.save()
return dict(error=error_message)
@bottle.post('/signin')
def do_signin():
session = _get_session()
user = bottle.request.forms.get('user')
password = bottle.request.forms.get('pass')
if not user or not password:
session['error'] = 'Both username and password are required.'
session.save()
bottle.redirect('/signin')
try:
conn = deco.connect(database=user, user=user, password=password)
except psycopg2.Error as e:
if 'authentication failed' in unicode(e):
session['error'] = 'Password authentication failed.'
elif 'does not exist' in unicode(e):
session['error'] = 'Username does not exist.'
else:
session['error'] = unicode(e)
session.save()
bottle.redirect('/signin')
else:
conn.close()
session['user'] = user
session['password'] = password
if bottle.request.forms.get('remember'):
session['_expires'] = datetime.utcnow() + timedelta(days=7)
session.save()
bottle.redirect('/')
@bottle.get('/signout')
@signed_in
def signout():
session = _get_session()
session.invalidate()
session.save()
bottle.redirect('/signin')
@bottle.get('/signup')
@bottle.jinja2_view(
os.path.join(os.path.dirname(__file__), 'templates', 'signup.html'))
def signup():
session = _get_session()
error_message = session.get('error', '')
session.delete()
session.save()
return dict(error=error_message)
@bottle.post('/signup')
def do_signup():
session = _get_session()
user = bottle.request.forms.get('user')
password = bottle.request.forms.get('pass')
password2 = bottle.request.forms.get('pass2')
if not user or not password or not password2:
session['error'] = 'All fields are required.'
elif password != password2:
session['error'] = 'Passwords do not match.'
elif len(user) > 16:
session['error'] = 'Username is too long.'
elif len(password) > 32:
session['error'] = 'Password is too long.'
elif not ('a' <= user[0] <= 'z') and not ('A' <= user[0] <= 'Z'):
session['error'] = 'Username must begin with a letter.'
elif re.match('\w+', user).end() != len(user):
session['error'] = 'Username is invalid.'
if 'error' in session:
session.save()
bottle.redirect('/signup')
try:
with closing(psycopg2.connect(**connect_kwargs)) as dbconn:
with closing(dbconn.cursor()) as dbcur:
dbcur.execute('COMMIT')
dbcur.execute('CREATE DATABASE "{}"'.format(user))
dbcur.execute('CREATE ROLE "{}" LOGIN PASSWORD \'{}\''.format(
user, password))
dbcur.execute('ALTER DATABASE "{}" OWNER TO "{}"'.format(
user, user))
dbconn.commit()
except psycopg2.Error as e:
if 'already exists' in unicode(e):
session['error'] = 'Username already exists.'
else:
session['error'] = unicode(e)
session.save()
bottle.redirect('/signup')
session = _get_session()
session['user'] = user
session['password'] = password
session.save()
bottle.redirect('/')
class GeventWebSocketServer(bottle.ServerAdapter):
def run(self, handler):
server = gevent.pywsgi.WSGIServer(
(self.host, self.port), app,
handler_class=geventwebsocket.handler.WebSocketHandler)
server.serve_forever()
def main():
parser = argparse.ArgumentParser(
add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--help', action='help', default=argparse.SUPPRESS,
help='show this help message and exit')
parser.add_argument(
'--version', action='version',
version='Deco-{}, deco-webui-{}'.format(deco.__version__, __version__))
if psycopg2.__name__ == 'sqlite3':
parser.add_argument(
'-d', dest='database',
default=os.path.join(tempfile.gettempdir(), 'deco.db'),
help='database filename')
else:
parser.add_argument(
'-u', dest='user', default=getpass.getuser(),
help='database username')
parser.add_argument(
'-p', dest='password', action='store_true',
default=argparse.SUPPRESS,
help='prompt for password')
parser.add_argument(
'-h', dest='host', default=argparse.SUPPRESS,
help='database host address')
parser.add_argument(
'-d', dest='database', default=getpass.getuser(),
help='database name')
args = vars(parser.parse_args())
if 'password' in args:
args['password'] = getpass.getpass()
global connect_kwargs
connect_kwargs = args
# make sure we can connect to the backend database
try:
dbconn = psycopg2.connect(**connect_kwargs)
except psycopg2.Error as e:
sys.exit(unicode(e))
else:
dbconn.close()
# start the server
bottle.debug(True)
bottle.run(app, host='0.0.0.0', port=8080,
server=GeventWebSocketServer)
if __name__ == '__main__':
main()
|
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
cmd = "%s -s %s uninstall org.xwalk.embedded.api.sample" % (
ADB_CMD, PARAMETERS.device)
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
return False
break
cmd = "%s -s %s uninstall org.xwalk.embedded.api.asyncsample" % (
ADB_CMD, PARAMETERS.device)
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
return False
break
return True
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
sleep(5)
type('o', KeyModifier.CTRL)
sleep(1)
path2VanessaBehavoir = sys.argv[1]
paste(path2VanessaBehavoir)
sleep(2)
type(Key.ENTER)
sleep(3)
type(Key.F4, KeyModifier.ALT)
exit(0)
|
"""
custom QToolButton icon
Tested environment:
Mac OS X 10.6.8
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class Demo(QtGui.QWidget):
def __init__(self):
super(Demo, self).__init__()
x, y, w, h = 500, 200, 300, 400
self.setGeometry(x, y, w, h)
path = "mic-64x64.png"
pix = QtGui.QPixmap(path)
label = QtGui.QLabel(self)
label.move(10, 10)
label.setPixmap(pix)
btn = QtGui.QToolButton(self)
btn.move(10, 100)
btn.setIconSize(QtCore.QSize(64, 64))
# way A
# way B
icon = QtGui.QIcon(pix)
act = QtGui.QAction(icon, "Send", self)
btn.setDefaultAction(act)
style = "QLabel, QToolButton { border : 1px solid red; }"
self.setStyleSheet(style)
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_())
|
"""Main entry point
"""
import os
import logging
import pkg_resources
__version__ = pkg_resources.get_distribution(__package__).version
logger = logging.getLogger(__name__)
import six
from cornice import Service
from pyramid import httpexceptions
from pyramid.config import Configurator
from pyramid.events import NewRequest
from pyramid.renderers import JSONP
from pyramid.authentication import BasicAuthAuthenticationPolicy
from pyramid_hawkauth import HawkAuthenticationPolicy
from pyramid_multiauth import MultiAuthenticationPolicy
from daybed.permissions import (
RootFactory, DaybedAuthorizationPolicy, get_credentials, check_credentials
)
from daybed.views.errors import forbidden_view
from daybed.renderers import GeoJSON
from daybed import indexer, events
API_VERSION = 'v%s' % __version__.split('.')[0]
def settings_expandvars(settings):
"""Expands all environment variables in a settings dictionary.
"""
return dict((key, os.path.expandvars(value))
for key, value in six.iteritems(settings))
def build_list(variable):
if not variable:
return []
elif "\n" in variable:
variable = variable.split("\n")
else:
variable = variable.split(",")
return [v.strip() for v in variable]
def redirect_to_version(request):
"""Redirect to the current version of the API."""
raise httpexceptions.HTTPTemporaryRedirect(
'/%s/%s' % (API_VERSION, request.matchdict['path']))
def main(global_config, **settings):
Service.cors_origins = ('*',)
settings = settings_expandvars(settings)
config = Configurator(settings=settings, root_factory=RootFactory)
config.include("cornice")
# Redirect to the current version of the API if the prefix isn't used.
config.add_route(name='redirect_to_version',
pattern='/{path:(?!%s).*}' % API_VERSION)
config.add_view(view=redirect_to_version, route_name='redirect_to_version')
config.route_prefix = '/%s' % API_VERSION
# Permission management
policies = [
BasicAuthAuthenticationPolicy(check_credentials),
HawkAuthenticationPolicy(decode_hawk_id=get_credentials),
]
authn_policy = MultiAuthenticationPolicy(policies)
# Unauthorized view
config.add_forbidden_view(forbidden_view)
# Global permissions
model_creators = settings.get("daybed.can_create_model", "Everyone")
token_creators = settings.get("daybed.can_create_token", "Everyone")
token_managers = settings.get("daybed.can_manage_token", None)
authz_policy = DaybedAuthorizationPolicy(
model_creators=build_list(model_creators),
token_creators=build_list(token_creators),
token_managers=build_list(token_managers),
)
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(authz_policy)
# We need to scan AFTER setting the authn / authz policies
config.scan("daybed.views")
# Attach the token to the request, coming from Pyramid as userid
def get_credentials_id(request):
try:
credentials_id, _ = get_credentials(request,
request.authenticated_userid)
return credentials_id
except ValueError:
return None
config.add_request_method(get_credentials_id, 'credentials_id', reify=True)
# Events
# Helper for notifying events
def notify(request, event, *args):
klass = config.maybe_dotted('daybed.events.' + event)
event = klass(*(args + (request,)))
request.registry.notify(event)
config.add_request_method(notify, 'notify')
# Backend
config.registry.tokenHmacKey = settings['daybed.tokenHmacKey']
# backend initialisation
backend_class = config.maybe_dotted(settings['daybed.backend'])
config.registry.backend = backend_class.load_from_config(config)
# Indexing
# Connect client to hosts in conf
index_hosts = build_list(settings.get('elasticsearch.hosts',
"localhost:9200"))
indices_prefix = settings.get('elasticsearch.indices_prefix', 'daybed_')
config.registry.index = index = indexer.ElasticSearchIndexer(
index_hosts, indices_prefix
)
# Suscribe index methods to API events
config.add_subscriber(index.on_model_created, events.ModelCreated)
config.add_subscriber(index.on_model_updated, events.ModelUpdated)
config.add_subscriber(index.on_model_deleted, events.ModelDeleted)
config.add_subscriber(index.on_record_created, events.RecordCreated)
config.add_subscriber(index.on_record_updated, events.RecordUpdated)
config.add_subscriber(index.on_record_deleted, events.RecordDeleted)
# Renderers
# Force default accept header to JSON
def add_default_accept(event):
json_mime = 'application/json'
accept = event.request.headers.get('Accept', json_mime)
if json_mime in accept:
accept = json_mime
event.request.headers["Accept"] = accept
config.add_subscriber(add_default_accept, NewRequest)
# JSONP
config.add_renderer('jsonp', JSONP(param_name='callback'))
# Geographic data renderer
config.add_renderer('geojson', GeoJSON())
# Requests attachments
def attach_objects_to_request(event):
event.request.db = config.registry.backend
event.request.index = config.registry.index
http_scheme = event.request.registry.settings.get('daybed.http_scheme')
if http_scheme:
event.request.scheme = http_scheme
config.add_subscriber(attach_objects_to_request, NewRequest)
# Plugins
try:
config.include("daybed_browserid")
except ImportError:
pass
return config.make_wsgi_app()
|
import errno
import json
import os
import socket
import stat
import sys
import time
from contextlib import contextmanager
from datetime import datetime, timezone, timedelta
from functools import partial
from getpass import getuser
from io import BytesIO
from itertools import groupby
from shutil import get_terminal_size
import msgpack
from .logger import create_logger
logger = create_logger()
from . import xattr
from .algorithms.chunker import Chunker
from .cache import ChunkListEntry
from .crypto.key import key_factory
from .compress import Compressor, CompressionSpec
from .constants import * # NOQA
from .hashindex import ChunkIndex, ChunkIndexEntry
from .helpers import Manifest
from .helpers import hardlinkable
from .helpers import ChunkIteratorFileWrapper, open_item
from .helpers import Error, IntegrityError, set_ec
from .helpers import uid2user, user2uid, gid2group, group2gid
from .helpers import parse_timestamp, to_localtime
from .helpers import format_time, format_timedelta, format_file_size, file_status, FileSize
from .helpers import safe_encode, safe_decode, make_path_safe, remove_surrogates
from .helpers import StableDict
from .helpers import bin_to_hex
from .helpers import safe_ns
from .helpers import ellipsis_truncate, ProgressIndicatorPercent, log_multi
from .patterns import PathPrefixPattern, FnmatchPattern, IECommand
from .item import Item, ArchiveItem
from .platform import acl_get, acl_set, set_flags, get_flags, swidth
from .remote import cache_if_remote
from .repository import Repository, LIST_SCAN_LIMIT
has_lchmod = hasattr(os, 'lchmod')
flags_normal = os.O_RDONLY | getattr(os, 'O_BINARY', 0)
flags_noatime = flags_normal | getattr(os, 'O_NOATIME', 0)
class Statistics:
def __init__(self, output_json=False):
self.output_json = output_json
self.osize = self.csize = self.usize = self.nfiles = 0
self.last_progress = 0 # timestamp when last progress was shown
def update(self, size, csize, unique):
self.osize += size
self.csize += csize
if unique:
self.usize += csize
summary = "{label:15} {stats.osize_fmt:>20s} {stats.csize_fmt:>20s} {stats.usize_fmt:>20s}"
def __str__(self):
return self.summary.format(stats=self, label='This archive:')
def __repr__(self):
return "<{cls} object at {hash:#x} ({self.osize}, {self.csize}, {self.usize})>".format(
cls=type(self).__name__, hash=id(self), self=self)
def as_dict(self):
return {
'original_size': FileSize(self.osize),
'compressed_size': FileSize(self.csize),
'deduplicated_size': FileSize(self.usize),
'nfiles': self.nfiles,
}
@property
def osize_fmt(self):
return format_file_size(self.osize)
@property
def usize_fmt(self):
return format_file_size(self.usize)
@property
def csize_fmt(self):
return format_file_size(self.csize)
def show_progress(self, item=None, final=False, stream=None, dt=None):
now = time.monotonic()
if dt is None or now - self.last_progress > dt:
self.last_progress = now
if self.output_json:
data = self.as_dict()
data.update({
'time': time.time(),
'type': 'archive_progress',
'path': remove_surrogates(item.path if item else ''),
})
msg = json.dumps(data)
end = '\n'
else:
columns, lines = get_terminal_size()
if not final:
msg = '{0.osize_fmt} O {0.csize_fmt} C {0.usize_fmt} D {0.nfiles} N '.format(self)
path = remove_surrogates(item.path) if item else ''
space = columns - swidth(msg)
if space < 12:
msg = ''
space = columns - swidth(msg)
if space >= 8:
msg += ellipsis_truncate(path, space)
else:
msg = ' ' * columns
end = '\r'
print(msg, end=end, file=stream or sys.stderr, flush=True)
def is_special(mode):
# file types that get special treatment in --read-special mode
return stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
class BackupOSError(Exception):
"""
Wrapper for OSError raised while accessing backup files.
Borg does different kinds of IO, and IO failures have different consequences.
This wrapper represents failures of input file or extraction IO.
These are non-critical and are only reported (exit code = 1, warning).
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
"""
def __init__(self, op, os_error):
self.op = op
self.os_error = os_error
self.errno = os_error.errno
self.strerror = os_error.strerror
self.filename = os_error.filename
def __str__(self):
if self.op:
return '%s: %s' % (self.op, self.os_error)
else:
return str(self.os_error)
class BackupIO:
op = ''
def __call__(self, op=''):
self.op = op
return self
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type and issubclass(exc_type, OSError):
raise BackupOSError(self.op, exc_val) from exc_val
backup_io = BackupIO()
def backup_io_iter(iterator):
backup_io.op = 'read'
while True:
try:
with backup_io:
item = next(iterator)
except StopIteration:
return
yield item
class DownloadPipeline:
def __init__(self, repository, key):
self.repository = repository
self.key = key
def unpack_many(self, ids, filter=None, preload=False):
"""
Return iterator of items.
*ids* is a chunk ID list of an item stream. *filter* is a callable
to decide whether an item will be yielded. *preload* preloads the data chunks of every yielded item.
Warning: if *preload* is True then all data chunks of every yielded item have to be retrieved,
otherwise preloaded chunks will accumulate in RemoteRepository and create a memory leak.
"""
unpacker = msgpack.Unpacker(use_list=False)
for data in self.fetch_many(ids):
unpacker.feed(data)
items = [Item(internal_dict=item) for item in unpacker]
for item in items:
if 'chunks' in item:
item.chunks = [ChunkListEntry(*e) for e in item.chunks]
if filter:
items = [item for item in items if filter(item)]
if preload:
for item in items:
if 'chunks' in item:
self.repository.preload([c.id for c in item.chunks])
for item in items:
yield item
def fetch_many(self, ids, is_preloaded=False):
for id_, data in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)):
yield self.key.decrypt(id_, data)
class ChunkBuffer:
BUFFER_SIZE = 8 * 1024 * 1024
def __init__(self, key, chunker_params=ITEMS_CHUNKER_PARAMS):
self.buffer = BytesIO()
self.packer = msgpack.Packer(unicode_errors='surrogateescape')
self.chunks = []
self.key = key
self.chunker = Chunker(self.key.chunk_seed, *chunker_params)
def add(self, item):
self.buffer.write(self.packer.pack(item.as_dict()))
if self.is_full():
self.flush()
def write_chunk(self, chunk):
raise NotImplementedError
def flush(self, flush=False):
if self.buffer.tell() == 0:
return
self.buffer.seek(0)
# The chunker returns a memoryview to its internal buffer,
# thus a copy is needed before resuming the chunker iterator.
chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
self.buffer.seek(0)
self.buffer.truncate(0)
# Leave the last partial chunk in the buffer unless flush is True
end = None if flush or len(chunks) == 1 else -1
for chunk in chunks[:end]:
self.chunks.append(self.write_chunk(chunk))
if end == -1:
self.buffer.write(chunks[-1])
def is_full(self):
return self.buffer.tell() > self.BUFFER_SIZE
class CacheChunkBuffer(ChunkBuffer):
def __init__(self, cache, key, stats, chunker_params=ITEMS_CHUNKER_PARAMS):
super().__init__(key, chunker_params)
self.cache = cache
self.stats = stats
def write_chunk(self, chunk):
id_, _, _ = self.cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats, wait=False)
self.cache.repository.async_response(wait=False)
return id_
class Archive:
class DoesNotExist(Error):
"""Archive {} does not exist"""
class AlreadyExists(Error):
"""Archive {} already exists"""
class IncompatibleFilesystemEncodingError(Error):
"""Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable."""
def __init__(self, repository, key, manifest, name, cache=None, create=False,
checkpoint_interval=300, numeric_owner=False, noatime=False, noctime=False, progress=False,
chunker_params=CHUNKER_PARAMS, start=None, start_monotonic=None, end=None,
consider_part_files=False, log_json=False):
self.cwd = os.getcwd()
self.key = key
self.repository = repository
self.cache = cache
self.manifest = manifest
self.hard_links = {}
self.stats = Statistics(output_json=log_json)
self.show_progress = progress
self.name = name
self.checkpoint_interval = checkpoint_interval
self.numeric_owner = numeric_owner
self.noatime = noatime
self.noctime = noctime
assert (start is None) == (start_monotonic is None), 'Logic error: if start is given, start_monotonic must be given as well and vice versa.'
if start is None:
start = datetime.utcnow()
start_monotonic = time.monotonic()
self.chunker_params = chunker_params
self.start = start
self.start_monotonic = start_monotonic
if end is None:
end = datetime.utcnow()
self.end = end
self.consider_part_files = consider_part_files
self.pipeline = DownloadPipeline(self.repository, self.key)
self.create = create
if self.create:
self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
self.chunker = Chunker(self.key.chunk_seed, *chunker_params)
if name in manifest.archives:
raise self.AlreadyExists(name)
self.last_checkpoint = time.monotonic()
i = 0
while True:
self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '')
if self.checkpoint_name not in manifest.archives:
break
i += 1
else:
info = self.manifest.archives.get(name)
if info is None:
raise self.DoesNotExist(name)
self.load(info.id)
self.zeros = None
def _load_meta(self, id):
data = self.key.decrypt(id, self.repository.get(id))
metadata = ArchiveItem(internal_dict=msgpack.unpackb(data, unicode_errors='surrogateescape'))
if metadata.version != 1:
raise Exception('Unknown archive metadata version')
return metadata
def load(self, id):
self.id = id
self.metadata = self._load_meta(self.id)
self.metadata.cmdline = [safe_decode(arg) for arg in self.metadata.cmdline]
self.name = self.metadata.name
@property
def ts(self):
"""Timestamp of archive creation (start) in UTC"""
ts = self.metadata.time
return parse_timestamp(ts)
@property
def ts_end(self):
"""Timestamp of archive creation (end) in UTC"""
# fall back to time if there is no time_end present in metadata
ts = self.metadata.get('time_end') or self.metadata.time
return parse_timestamp(ts)
@property
def fpr(self):
return bin_to_hex(self.id)
@property
def duration(self):
return format_timedelta(self.end - self.start)
@property
def duration_from_meta(self):
return format_timedelta(self.ts_end - self.ts)
def info(self):
if self.create:
stats = self.stats
start = self.start.replace(tzinfo=timezone.utc)
end = self.end.replace(tzinfo=timezone.utc)
else:
stats = self.calc_stats(self.cache)
start = self.ts
end = self.ts_end
info = {
'name': self.name,
'id': self.fpr,
'start': format_time(to_localtime(start)),
'end': format_time(to_localtime(end)),
'duration': (end - start).total_seconds(),
'stats': stats.as_dict(),
'limits': {
'max_archive_size': self.cache.chunks[self.id].csize / MAX_DATA_SIZE,
},
}
if self.create:
info['command_line'] = sys.argv
else:
info.update({
'command_line': self.metadata.cmdline,
'hostname': self.metadata.hostname,
'username': self.metadata.username,
'comment': self.metadata.get('comment', ''),
})
return info
def __str__(self):
return '''\
Archive name: {0.name}
Archive fingerprint: {0.fpr}
Time (start): {start}
Time (end): {end}
Duration: {0.duration}
Number of files: {0.stats.nfiles}
Utilization of max. archive size: {csize_max:.0%}
'''.format(
self,
start=format_time(to_localtime(self.start.replace(tzinfo=timezone.utc))),
end=format_time(to_localtime(self.end.replace(tzinfo=timezone.utc))),
csize_max=self.cache.chunks[self.id].csize / MAX_DATA_SIZE)
def __repr__(self):
return 'Archive(%r)' % self.name
def item_filter(self, item, filter=None):
if not self.consider_part_files and 'part' in item:
# this is a part(ial) file, we usually don't want to consider it.
return False
return filter(item) if filter else True
def iter_items(self, filter=None, preload=False):
for item in self.pipeline.unpack_many(self.metadata.items, preload=preload,
filter=lambda item: self.item_filter(item, filter)):
yield item
def add_item(self, item, show_progress=True):
if show_progress and self.show_progress:
self.stats.show_progress(item=item, dt=0.2)
self.items_buffer.add(item)
def write_checkpoint(self):
self.save(self.checkpoint_name)
del self.manifest.archives[self.checkpoint_name]
self.cache.chunk_decref(self.id, self.stats)
def save(self, name=None, comment=None, timestamp=None, additional_metadata=None):
name = name or self.name
if name in self.manifest.archives:
raise self.AlreadyExists(name)
self.items_buffer.flush(flush=True)
duration = timedelta(seconds=time.monotonic() - self.start_monotonic)
if timestamp is None:
self.end = datetime.utcnow()
self.start = self.end - duration
start = self.start
end = self.end
else:
self.end = timestamp
self.start = timestamp - duration
end = timestamp
start = self.start
metadata = {
'version': 1,
'name': name,
'comment': comment or '',
'items': self.items_buffer.chunks,
'cmdline': sys.argv,
'hostname': socket.gethostname(),
'username': getuser(),
'time': start.isoformat(),
'time_end': end.isoformat(),
'chunker_params': self.chunker_params,
}
metadata.update(additional_metadata or {})
metadata = ArchiveItem(metadata)
data = self.key.pack_and_authenticate_metadata(metadata.as_dict(), context=b'archive')
self.id = self.key.id_hash(data)
self.cache.add_chunk(self.id, data, self.stats)
while self.repository.async_response(wait=True) is not None:
pass
self.manifest.archives[name] = (self.id, metadata.time)
self.manifest.write()
self.repository.commit()
self.cache.commit()
def calc_stats(self, cache):
def add(id):
count, size, csize = cache.chunks[id]
stats.update(size, csize, count == 1)
cache.chunks[id] = count - 1, size, csize
def add_file_chunks(chunks):
for id, _, _ in chunks:
add(id)
# This function is a bit evil since it abuses the cache to calculate
# the stats. The cache transaction must be rolled back afterwards
unpacker = msgpack.Unpacker(use_list=False)
cache.begin_txn()
stats = Statistics()
add(self.id)
for id, chunk in zip(self.metadata.items, self.repository.get_many(self.metadata.items)):
add(id)
data = self.key.decrypt(id, chunk)
unpacker.feed(data)
for item in unpacker:
chunks = item.get(b'chunks')
if chunks is not None:
stats.nfiles += 1
add_file_chunks(chunks)
cache.rollback()
return stats
@contextmanager
def extract_helper(self, dest, item, path, stripped_components, original_path, hardlink_masters):
hardlink_set = False
# Hard link?
if 'source' in item:
source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:])
chunks, link_target = hardlink_masters.get(item.source, (None, source))
if link_target:
# Hard link was extracted previously, just link
with backup_io('link'):
os.link(link_target, path)
hardlink_set = True
elif chunks is not None:
# assign chunks to this item, since the item which had the chunks was not extracted
item.chunks = chunks
yield hardlink_set
if not hardlink_set and hardlink_masters:
# Update master entry with extracted item path, so that following hardlinks don't extract twice.
hardlink_masters[item.get('source') or original_path] = (None, path)
def extract_item(self, item, restore_attrs=True, dry_run=False, stdout=False, sparse=False,
hardlink_masters=None, stripped_components=0, original_path=None, pi=None):
"""
Extract archive item.
:param item: the item to extract
:param restore_attrs: restore file attributes
:param dry_run: do not write any data
:param stdout: write extracted data to stdout
:param sparse: write sparse files (chunk-granularity, independent of the original being sparse)
:param hardlink_masters: maps paths to (chunks, link_target) for extracting subtrees with hardlinks correctly
:param stripped_components: stripped leading path components to correct hard link extraction
:param original_path: 'path' key as stored in archive
:param pi: ProgressIndicatorPercent (or similar) for file extraction progress (in bytes)
"""
hardlink_masters = hardlink_masters or {}
has_damaged_chunks = 'chunks_healthy' in item
if dry_run or stdout:
if 'chunks' in item:
item_chunks_size = 0
for data in self.pipeline.fetch_many([c.id for c in item.chunks], is_preloaded=True):
if pi:
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
if stdout:
sys.stdout.buffer.write(data)
item_chunks_size += len(data)
if stdout:
sys.stdout.buffer.flush()
if 'size' in item:
item_size = item.size
if item_size != item_chunks_size:
logger.warning('{}: size inconsistency detected: size {}, chunks size {}'.format(
item.path, item_size, item_chunks_size))
if has_damaged_chunks:
logger.warning('File %s has damaged (all-zero) chunks. Try running borg check --repair.' %
remove_surrogates(item.path))
return
original_path = original_path or item.path
dest = self.cwd
if item.path.startswith(('/', '..')):
raise Exception('Path should be relative and local')
path = os.path.join(dest, item.path)
# Attempt to remove existing files, ignore errors on failure
try:
st = os.stat(path, follow_symlinks=False)
if stat.S_ISDIR(st.st_mode):
os.rmdir(path)
else:
os.unlink(path)
except UnicodeEncodeError:
raise self.IncompatibleFilesystemEncodingError(path, sys.getfilesystemencoding()) from None
except OSError:
pass
def make_parent(path):
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
mode = item.mode
if stat.S_ISREG(mode):
with backup_io('makedirs'):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
if sparse and self.zeros is None:
self.zeros = b'\0' * (1 << self.chunker_params[1])
with backup_io('open'):
fd = open(path, 'wb')
with fd:
ids = [c.id for c in item.chunks]
for data in self.pipeline.fetch_many(ids, is_preloaded=True):
if pi:
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
with backup_io('write'):
if sparse and self.zeros.startswith(data):
# all-zero chunk: create a hole in a sparse file
fd.seek(len(data), 1)
else:
fd.write(data)
with backup_io('truncate_and_attrs'):
pos = item_chunks_size = fd.tell()
fd.truncate(pos)
fd.flush()
self.restore_attrs(path, item, fd=fd.fileno())
if 'size' in item:
item_size = item.size
if item_size != item_chunks_size:
logger.warning('{}: size inconsistency detected: size {}, chunks size {}'.format(
item.path, item_size, item_chunks_size))
if has_damaged_chunks:
logger.warning('File %s has damaged (all-zero) chunks. Try running borg check --repair.' %
remove_surrogates(item.path))
return
with backup_io:
# No repository access beyond this point.
if stat.S_ISDIR(mode):
make_parent(path)
if not os.path.exists(path):
os.mkdir(path)
if restore_attrs:
self.restore_attrs(path, item)
elif stat.S_ISLNK(mode):
make_parent(path)
source = item.source
try:
os.symlink(source, path)
except UnicodeEncodeError:
raise self.IncompatibleFilesystemEncodingError(source, sys.getfilesystemencoding()) from None
self.restore_attrs(path, item, symlink=True)
elif stat.S_ISFIFO(mode):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
os.mkfifo(path)
self.restore_attrs(path, item)
elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
os.mknod(path, item.mode, item.rdev)
self.restore_attrs(path, item)
else:
raise Exception('Unknown archive item type %r' % item.mode)
def restore_attrs(self, path, item, symlink=False, fd=None):
"""
Restore filesystem attributes on *path* (*fd*) from *item*.
Does not access the repository.
"""
backup_io.op = 'attrs'
uid = gid = None
if not self.numeric_owner:
uid = user2uid(item.user)
gid = group2gid(item.group)
uid = item.uid if uid is None else uid
gid = item.gid if gid is None else gid
# This code is a bit of a mess due to os specific differences
try:
if fd:
os.fchown(fd, uid, gid)
else:
os.chown(path, uid, gid, follow_symlinks=False)
except OSError:
pass
if fd:
os.fchmod(fd, item.mode)
elif not symlink:
os.chmod(path, item.mode)
elif has_lchmod: # Not available on Linux
os.lchmod(path, item.mode)
mtime = item.mtime
if 'atime' in item:
atime = item.atime
else:
# old archives only had mtime in item metadata
atime = mtime
try:
if fd:
os.utime(fd, None, ns=(atime, mtime))
else:
os.utime(path, None, ns=(atime, mtime), follow_symlinks=False)
except OSError:
# some systems don't support calling utime on a symlink
pass
acl_set(path, item, self.numeric_owner)
if 'bsdflags' in item:
try:
set_flags(path, item.bsdflags, fd=fd)
except OSError:
pass
# chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include
# the Linux capabilities in the "security.capability" attribute.
xattrs = item.get('xattrs', {})
for k, v in xattrs.items():
try:
xattr.setxattr(fd or path, k, v, follow_symlinks=False)
except OSError as e:
if e.errno == errno.E2BIG:
# xattr is too big
logger.warning('%s: Value or key of extended attribute %s is too big for this filesystem' %
(path, k.decode()))
set_ec(EXIT_WARNING)
elif e.errno == errno.ENOTSUP:
# xattrs not supported here
logger.warning('%s: Extended attributes are not supported on this filesystem' % path)
set_ec(EXIT_WARNING)
elif e.errno == errno.EACCES:
# permission denied to set this specific xattr (this may happen related to security.* keys)
logger.warning('%s: Permission denied when setting extended attribute %s' % (path, k.decode()))
set_ec(EXIT_WARNING)
else:
raise
def set_meta(self, key, value):
metadata = self._load_meta(self.id)
setattr(metadata, key, value)
data = msgpack.packb(metadata.as_dict(), unicode_errors='surrogateescape')
new_id = self.key.id_hash(data)
self.cache.add_chunk(new_id, data, self.stats)
self.manifest.archives[self.name] = (new_id, metadata.time)
self.cache.chunk_decref(self.id, self.stats)
self.id = new_id
def rename(self, name):
if name in self.manifest.archives:
raise self.AlreadyExists(name)
oldname = self.name
self.name = name
self.set_meta('name', name)
del self.manifest.archives[oldname]
def delete(self, stats, progress=False, forced=False):
class ChunksIndexError(Error):
"""Chunk ID {} missing from chunks index, corrupted chunks index - aborting transaction."""
exception_ignored = object()
def fetch_async_response(wait=True):
try:
return self.repository.async_response(wait=wait)
except Repository.ObjectNotFound as e:
nonlocal error
# object not in repo - strange, but we wanted to delete it anyway.
if forced == 0:
raise
error = True
return exception_ignored # must not return None here
def chunk_decref(id, stats):
try:
self.cache.chunk_decref(id, stats, wait=False)
except KeyError:
cid = bin_to_hex(id)
raise ChunksIndexError(cid)
else:
fetch_async_response(wait=False)
error = False
try:
unpacker = msgpack.Unpacker(use_list=False)
items_ids = self.metadata.items
pi = ProgressIndicatorPercent(total=len(items_ids), msg="Decrementing references %3.0f%%", msgid='archive.delete')
for (i, (items_id, data)) in enumerate(zip(items_ids, self.repository.get_many(items_ids))):
if progress:
pi.show(i)
data = self.key.decrypt(items_id, data)
unpacker.feed(data)
chunk_decref(items_id, stats)
try:
for item in unpacker:
item = Item(internal_dict=item)
if 'chunks' in item:
for chunk_id, size, csize in item.chunks:
chunk_decref(chunk_id, stats)
except (TypeError, ValueError):
# if items metadata spans multiple chunks and one chunk got dropped somehow,
# it could be that unpacker yields bad types
if forced == 0:
raise
error = True
if progress:
pi.finish()
except (msgpack.UnpackException, Repository.ObjectNotFound):
# items metadata corrupted
if forced == 0:
raise
error = True
# in forced delete mode, we try hard to delete at least the manifest entry,
# if possible also the archive superblock, even if processing the items raises
# some harmless exception.
chunk_decref(self.id, stats)
del self.manifest.archives[self.name]
while fetch_async_response(wait=True) is not None:
# we did async deletes, process outstanding results (== exceptions),
# so there is nothing pending when we return and our caller wants to commit.
pass
if error:
logger.warning('forced deletion succeeded, but the deleted archive was corrupted.')
logger.warning('borg check --repair is required to free all space.')
def stat_simple_attrs(self, st):
attrs = dict(
mode=st.st_mode,
uid=st.st_uid,
gid=st.st_gid,
mtime=safe_ns(st.st_mtime_ns),
)
# borg can work with archives only having mtime (older attic archives do not have
# atime/ctime). it can be useful to omit atime/ctime, if they change without the
# file content changing - e.g. to get better metadata deduplication.
if not self.noatime:
attrs['atime'] = safe_ns(st.st_atime_ns)
if not self.noctime:
attrs['ctime'] = safe_ns(st.st_ctime_ns)
if self.numeric_owner:
attrs['user'] = attrs['group'] = None
else:
attrs['user'] = uid2user(st.st_uid)
attrs['group'] = gid2group(st.st_gid)
return attrs
def stat_ext_attrs(self, st, path):
attrs = {}
with backup_io('extended stat'):
xattrs = xattr.get_all(path, follow_symlinks=False)
bsdflags = get_flags(path, st)
acl_get(path, attrs, st, self.numeric_owner)
if xattrs:
attrs['xattrs'] = StableDict(xattrs)
if bsdflags:
attrs['bsdflags'] = bsdflags
return attrs
def stat_attrs(self, st, path):
attrs = self.stat_simple_attrs(st)
attrs.update(self.stat_ext_attrs(st, path))
return attrs
@contextmanager
def create_helper(self, path, st, status=None, hardlinkable=True):
safe_path = make_path_safe(path)
item = Item(path=safe_path)
hardlink_master = False
hardlinked = hardlinkable and st.st_nlink > 1
if hardlinked:
source = self.hard_links.get((st.st_ino, st.st_dev))
if source is not None:
item.source = source
status = 'h' # hardlink (to already seen inodes)
else:
hardlink_master = True
yield item, status, hardlinked, hardlink_master
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
self.add_item(item)
# ... and added to the archive, so we can remember it to refer to it later in the archive:
if hardlink_master:
self.hard_links[(st.st_ino, st.st_dev)] = safe_path
def process_dir(self, path, st):
with self.create_helper(path, st, 'd', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
item.update(self.stat_attrs(st, path))
return status
def process_fifo(self, path, st):
with self.create_helper(path, st, 'f') as (item, status, hardlinked, hardlink_master): # fifo
item.update(self.stat_attrs(st, path))
return status
def process_dev(self, path, st, dev_type):
with self.create_helper(path, st, dev_type) as (item, status, hardlinked, hardlink_master): # char/block device
item.rdev = st.st_rdev
item.update(self.stat_attrs(st, path))
return status
def process_symlink(self, path, st):
# note: using hardlinkable=False because we can not support hardlinked symlinks,
# due to the dual-use of item.source, see issue #2343:
with self.create_helper(path, st, 's', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
with backup_io('readlink'):
source = os.readlink(path)
item.source = source
if st.st_nlink > 1:
logger.warning('hardlinked symlinks will be archived as non-hardlinked symlinks!')
item.update(self.stat_attrs(st, path))
return status
def write_part_file(self, item, from_chunk, number):
item = Item(internal_dict=item.as_dict())
length = len(item.chunks)
# the item should only have the *additional* chunks we processed after the last partial item:
item.chunks = item.chunks[from_chunk:]
item.get_size(memorize=True)
item.path += '.borg_part_%d' % number
item.part = number
number += 1
self.add_item(item, show_progress=False)
self.write_checkpoint()
return length, number
def chunk_file(self, item, cache, stats, chunk_iter, chunk_processor=None):
if not chunk_processor:
def chunk_processor(data):
chunk_entry = cache.add_chunk(self.key.id_hash(data), data, stats, wait=False)
self.cache.repository.async_response(wait=False)
return chunk_entry
item.chunks = []
from_chunk = 0
part_number = 1
for data in chunk_iter:
item.chunks.append(chunk_processor(data))
if self.show_progress:
self.stats.show_progress(item=item, dt=0.2)
if self.checkpoint_interval and time.monotonic() - self.last_checkpoint > self.checkpoint_interval:
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
self.last_checkpoint = time.monotonic()
else:
if part_number > 1:
if item.chunks[from_chunk:]:
# if we already have created a part item inside this file, we want to put the final
# chunks (if any) into a part item also (so all parts can be concatenated to get
# the complete file):
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
self.last_checkpoint = time.monotonic()
# if we created part files, we have referenced all chunks from the part files,
# but we also will reference the same chunks also from the final, complete file:
for chunk in item.chunks:
cache.chunk_incref(chunk.id, stats)
def process_stdin(self, path, cache):
uid, gid = 0, 0
t = int(time.time()) * 1000000000
item = Item(
path=path,
mode=0o100660, # regular file, ug=rw
uid=uid, user=uid2user(uid),
gid=gid, group=gid2group(gid),
mtime=t, atime=t, ctime=t,
)
fd = sys.stdin.buffer # binary
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd)))
item.get_size(memorize=True)
self.stats.nfiles += 1
self.add_item(item)
return 'i' # stdin
def process_file(self, path, st, cache, ignore_inode=False):
with self.create_helper(path, st, None) as (item, status, hardlinked, hardlink_master): # no status yet
is_special_file = is_special(st.st_mode)
if not hardlinked or hardlink_master:
if not is_special_file:
path_hash = self.key.id_hash(safe_encode(os.path.join(self.cwd, path)))
ids = cache.file_known_and_unchanged(path_hash, st, ignore_inode)
else:
# in --read-special mode, we may be called for special files.
# there should be no information in the cache about special files processed in
# read-special mode, but we better play safe as this was wrong in the past:
path_hash = ids = None
first_run = not cache.files and cache.do_files
if first_run:
logger.debug('Processing files ...')
chunks = None
if ids is not None:
# Make sure all ids are available
for id_ in ids:
if not cache.seen_chunk(id_):
break
else:
chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids]
status = 'U' # regular file, unchanged
else:
status = 'A' # regular file, added
item.hardlink_master = hardlinked
item.update(self.stat_simple_attrs(st))
# Only chunkify the file if needed
if chunks is not None:
item.chunks = chunks
else:
with backup_io('open'):
fh = Archive._open_rb(path)
with os.fdopen(fh, 'rb') as fd:
self.chunk_file(item, cache, self.stats, backup_io_iter(self.chunker.chunkify(fd, fh)))
if not is_special_file:
# we must not memorize special files, because the contents of e.g. a
# block or char device will change without its mtime/size/inode changing.
cache.memorize_file(path_hash, st, [c.id for c in item.chunks])
status = status or 'M' # regular file, modified (if not 'A' already)
self.stats.nfiles += 1
item.update(self.stat_attrs(st, path))
item.get_size(memorize=True)
if is_special_file:
# we processed a special file like a regular file. reflect that in mode,
# so it can be extracted / accessed in FUSE mount like a regular file:
item.mode = stat.S_IFREG | stat.S_IMODE(item.mode)
return status
@staticmethod
def list_archives(repository, key, manifest, cache=None):
# expensive! see also Manifest.list_archive_infos.
for name in manifest.archives:
yield Archive(repository, key, manifest, name, cache=cache)
@staticmethod
def _open_rb(path):
try:
# if we have O_NOATIME, this likely will succeed if we are root or owner of file:
return os.open(path, flags_noatime)
except PermissionError:
if flags_noatime == flags_normal:
# we do not have O_NOATIME, no need to try again:
raise
# Was this EPERM due to the O_NOATIME flag? Try again without it:
return os.open(path, flags_normal)
def valid_msgpacked_dict(d, keys_serialized):
"""check if the data <d> looks like a msgpacked dict"""
d_len = len(d)
if d_len == 0:
return False
if d[0] & 0xf0 == 0x80: # object is a fixmap (up to 15 elements)
offs = 1
elif d[0] == 0xde: # object is a map16 (up to 2^16-1 elements)
offs = 3
else:
# object is not a map (dict)
# note: we must not have dicts with > 2^16-1 elements
return False
if d_len <= offs:
return False
# is the first dict key a bytestring?
if d[offs] & 0xe0 == 0xa0: # key is a small bytestring (up to 31 chars)
pass
elif d[offs] in (0xd9, 0xda, 0xdb): # key is a str8, str16 or str32
pass
else:
# key is not a bytestring
return False
# is the bytestring any of the expected key names?
key_serialized = d[offs:]
return any(key_serialized.startswith(pattern) for pattern in keys_serialized)
class RobustUnpacker:
"""A restartable/robust version of the streaming msgpack unpacker
"""
class UnpackerCrashed(Exception):
"""raise if unpacker crashed"""
def __init__(self, validator, item_keys):
super().__init__()
self.item_keys = [msgpack.packb(name.encode()) for name in item_keys]
self.validator = validator
self._buffered_data = []
self._resync = False
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
def resync(self):
self._buffered_data = []
self._resync = True
def feed(self, data):
if self._resync:
self._buffered_data.append(data)
else:
self._unpacker.feed(data)
def __iter__(self):
return self
def __next__(self):
def unpack_next():
try:
return next(self._unpacker)
except (TypeError, ValueError) as err:
# transform exceptions that might be raised when feeding
# msgpack with invalid data to a more specific exception
raise self.UnpackerCrashed(str(err))
if self._resync:
data = b''.join(self._buffered_data)
while self._resync:
if not data:
raise StopIteration
# Abort early if the data does not look like a serialized item dict
if not valid_msgpacked_dict(data, self.item_keys):
data = data[1:]
continue
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
self._unpacker.feed(data)
try:
item = unpack_next()
except (self.UnpackerCrashed, StopIteration):
# as long as we are resyncing, we also ignore StopIteration
pass
else:
if self.validator(item):
self._resync = False
return item
data = data[1:]
else:
return unpack_next()
class ArchiveChecker:
def __init__(self):
self.error_found = False
self.possibly_superseded = set()
def check(self, repository, repair=False, archive=None, first=0, last=0, sort_by='', prefix='',
verify_data=False, save_space=False):
"""Perform a set of checks on 'repository'
:param repair: enable repair mode, write updated or corrected data into repository
:param archive: only check this archive
:param first/last/sort_by: only check this number of first/last archives ordered by sort_by
:param prefix: only check archives with this prefix
:param verify_data: integrity verification of data referenced by archives
:param save_space: Repository.commit(save_space)
"""
logger.info('Starting archive consistency check...')
self.check_all = archive is None and not any((first, last, prefix))
self.repair = repair
self.repository = repository
self.init_chunks()
if not self.chunks:
logger.error('Repository contains no apparent data at all, cannot continue check/repair.')
return False
self.key = self.identify_key(repository)
if verify_data:
self.verify_data()
if Manifest.MANIFEST_ID not in self.chunks:
logger.error("Repository manifest not found!")
self.error_found = True
self.manifest = self.rebuild_manifest()
else:
try:
self.manifest, _ = Manifest.load(repository, key=self.key)
except IntegrityError as exc:
logger.error('Repository manifest is corrupted: %s', exc)
self.error_found = True
del self.chunks[Manifest.MANIFEST_ID]
self.manifest = self.rebuild_manifest()
self.rebuild_refcounts(archive=archive, first=first, last=last, sort_by=sort_by, prefix=prefix)
self.orphan_chunks_check()
self.finish(save_space=save_space)
if self.error_found:
logger.error('Archive consistency check complete, problems found.')
else:
logger.info('Archive consistency check complete, no problems found.')
return self.repair or not self.error_found
def init_chunks(self):
"""Fetch a list of all object keys from repository
"""
# Explicitly set the initial hash table capacity to avoid performance issues
# due to hash table "resonance".
# Since reconstruction of archive items can add some new chunks, add 10 % headroom
capacity = int(len(self.repository) / ChunkIndex.MAX_LOAD_FACTOR * 1.1)
self.chunks = ChunkIndex(capacity)
marker = None
while True:
result = self.repository.list(limit=LIST_SCAN_LIMIT, marker=marker)
if not result:
break
marker = result[-1]
init_entry = ChunkIndexEntry(refcount=0, size=0, csize=0)
for id_ in result:
self.chunks[id_] = init_entry
def identify_key(self, repository):
try:
some_chunkid, _ = next(self.chunks.iteritems())
except StopIteration:
# repo is completely empty, no chunks
return None
cdata = repository.get(some_chunkid)
return key_factory(repository, cdata)
def verify_data(self):
logger.info('Starting cryptographic data integrity verification...')
chunks_count_index = len(self.chunks)
chunks_count_segments = 0
errors = 0
defect_chunks = []
pi = ProgressIndicatorPercent(total=chunks_count_index, msg="Verifying data %6.2f%%", step=0.01,
msgid='check.verify_data')
marker = None
while True:
chunk_ids = self.repository.scan(limit=100, marker=marker)
if not chunk_ids:
break
chunks_count_segments += len(chunk_ids)
marker = chunk_ids[-1]
chunk_data_iter = self.repository.get_many(chunk_ids)
chunk_ids_revd = list(reversed(chunk_ids))
while chunk_ids_revd:
pi.show()
chunk_id = chunk_ids_revd.pop(-1) # better efficiency
try:
encrypted_data = next(chunk_data_iter)
except (Repository.ObjectNotFound, IntegrityError) as err:
self.error_found = True
errors += 1
logger.error('chunk %s: %s', bin_to_hex(chunk_id), err)
if isinstance(err, IntegrityError):
defect_chunks.append(chunk_id)
# as the exception killed our generator, make a new one for remaining chunks:
if chunk_ids_revd:
chunk_ids = list(reversed(chunk_ids_revd))
chunk_data_iter = self.repository.get_many(chunk_ids)
else:
_chunk_id = None if chunk_id == Manifest.MANIFEST_ID else chunk_id
try:
self.key.decrypt(_chunk_id, encrypted_data)
except IntegrityError as integrity_error:
self.error_found = True
errors += 1
logger.error('chunk %s, integrity error: %s', bin_to_hex(chunk_id), integrity_error)
defect_chunks.append(chunk_id)
pi.finish()
if chunks_count_index != chunks_count_segments:
logger.error('Repo/Chunks index object count vs. segment files object count mismatch.')
logger.error('Repo/Chunks index: %d objects != segment files: %d objects',
chunks_count_index, chunks_count_segments)
if defect_chunks:
if self.repair:
# if we kill the defect chunk here, subsequent actions within this "borg check"
# run will find missing chunks and replace them with all-zero replacement
# chunks and flag the files as "repaired".
# if another backup is done later and the missing chunks get backupped again,
# a "borg check" afterwards can heal all files where this chunk was missing.
logger.warning('Found defect chunks. They will be deleted now, so affected files can '
'get repaired now and maybe healed later.')
for defect_chunk in defect_chunks:
# remote repo (ssh): retry might help for strange network / NIC / RAM errors
# as the chunk will be retransmitted from remote server.
# local repo (fs): as chunks.iteritems loop usually pumps a lot of data through,
# a defect chunk is likely not in the fs cache any more and really gets re-read
# from the underlying media.
try:
encrypted_data = self.repository.get(defect_chunk)
_chunk_id = None if defect_chunk == Manifest.MANIFEST_ID else defect_chunk
self.key.decrypt(_chunk_id, encrypted_data)
except IntegrityError:
# failed twice -> get rid of this chunk
del self.chunks[defect_chunk]
self.repository.delete(defect_chunk)
logger.debug('chunk %s deleted.', bin_to_hex(defect_chunk))
else:
logger.warning('chunk %s not deleted, did not consistently fail.')
else:
logger.warning('Found defect chunks. With --repair, they would get deleted, so affected '
'files could get repaired then and maybe healed later.')
for defect_chunk in defect_chunks:
logger.debug('chunk %s is defect.', bin_to_hex(defect_chunk))
log = logger.error if errors else logger.info
log('Finished cryptographic data integrity verification, verified %d chunks with %d integrity errors.',
chunks_count_segments, errors)
def rebuild_manifest(self):
"""Rebuild the manifest object if it is missing
Iterates through all objects in the repository looking for archive metadata blocks.
"""
required_archive_keys = frozenset(key.encode() for key in REQUIRED_ARCHIVE_KEYS)
def valid_archive(obj):
if not isinstance(obj, dict):
return False
keys = set(obj)
return required_archive_keys.issubset(keys)
logger.info('Rebuilding missing manifest, this might take some time...')
# as we have lost the manifest, we do not know any more what valid item keys we had.
# collecting any key we encounter in a damaged repo seems unwise, thus we just use
# the hardcoded list from the source code. thus, it is not recommended to rebuild a
# lost manifest on a older borg version than the most recent one that was ever used
# within this repository (assuming that newer borg versions support more item keys).
manifest = Manifest(self.key, self.repository)
archive_keys_serialized = [msgpack.packb(name.encode()) for name in ARCHIVE_KEYS]
for chunk_id, _ in self.chunks.iteritems():
cdata = self.repository.get(chunk_id)
try:
data = self.key.decrypt(chunk_id, cdata)
except IntegrityError as exc:
logger.error('Skipping corrupted chunk: %s', exc)
self.error_found = True
continue
if not valid_msgpacked_dict(data, archive_keys_serialized):
continue
if b'cmdline' not in data or b'\xa7version\x01' not in data:
continue
try:
archive = msgpack.unpackb(data)
# Ignore exceptions that might be raised when feeding
# msgpack with invalid data
except (TypeError, ValueError, StopIteration):
continue
if valid_archive(archive):
archive = ArchiveItem(internal_dict=archive)
name = archive.name
logger.info('Found archive %s', name)
if name in manifest.archives:
i = 1
while True:
new_name = '%s.%d' % (name, i)
if new_name not in manifest.archives:
break
i += 1
logger.warning('Duplicate archive name %s, storing as %s', name, new_name)
name = new_name
manifest.archives[name] = (chunk_id, archive.time)
logger.info('Manifest rebuild complete.')
return manifest
def rebuild_refcounts(self, archive=None, first=0, last=0, sort_by='', prefix=''):
"""Rebuild object reference counts by walking the metadata
Missing and/or incorrect data is repaired when detected
"""
# Exclude the manifest from chunks
del self.chunks[Manifest.MANIFEST_ID]
def mark_as_possibly_superseded(id_):
if self.chunks.get(id_, ChunkIndexEntry(0, 0, 0)).refcount == 0:
self.possibly_superseded.add(id_)
def add_callback(chunk):
id_ = self.key.id_hash(chunk)
cdata = self.key.encrypt(chunk)
add_reference(id_, len(chunk), len(cdata), cdata)
return id_
def add_reference(id_, size, csize, cdata=None):
try:
self.chunks.incref(id_)
except KeyError:
assert cdata is not None
self.chunks[id_] = ChunkIndexEntry(refcount=1, size=size, csize=csize)
if self.repair:
self.repository.put(id_, cdata)
def verify_file_chunks(item):
"""Verifies that all file chunks are present.
Missing file chunks will be replaced with new chunks of the same length containing all zeros.
If a previously missing file chunk re-appears, the replacement chunk is replaced by the correct one.
"""
def replacement_chunk(size):
data = bytes(size)
chunk_id = self.key.id_hash(data)
cdata = self.key.encrypt(data)
csize = len(cdata)
return chunk_id, size, csize, cdata
offset = 0
chunk_list = []
chunks_replaced = False
has_chunks_healthy = 'chunks_healthy' in item
chunks_current = item.chunks
chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current
assert len(chunks_current) == len(chunks_healthy)
for chunk_current, chunk_healthy in zip(chunks_current, chunks_healthy):
chunk_id, size, csize = chunk_healthy
if chunk_id not in self.chunks:
# a chunk of the healthy list is missing
if chunk_current == chunk_healthy:
logger.error('{}: New missing file chunk detected (Byte {}-{}). '
'Replacing with all-zero chunk.'.format(item.path, offset, offset + size))
self.error_found = chunks_replaced = True
chunk_id, size, csize, cdata = replacement_chunk(size)
add_reference(chunk_id, size, csize, cdata)
else:
logger.info('{}: Previously missing file chunk is still missing (Byte {}-{}). It has a '
'all-zero replacement chunk already.'.format(item.path, offset, offset + size))
chunk_id, size, csize = chunk_current
if chunk_id in self.chunks:
add_reference(chunk_id, size, csize)
else:
logger.warning('{}: Missing all-zero replacement chunk detected (Byte {}-{}). '
'Generating new replacement chunk.'.format(item.path, offset, offset + size))
self.error_found = chunks_replaced = True
chunk_id, size, csize, cdata = replacement_chunk(size)
add_reference(chunk_id, size, csize, cdata)
else:
if chunk_current == chunk_healthy:
# normal case, all fine.
add_reference(chunk_id, size, csize)
else:
logger.info('{}: Healed previously missing file chunk! '
'(Byte {}-{}).'.format(item.path, offset, offset + size))
add_reference(chunk_id, size, csize)
mark_as_possibly_superseded(chunk_current[0]) # maybe orphaned the all-zero replacement chunk
chunk_list.append([chunk_id, size, csize]) # list-typed element as chunks_healthy is list-of-lists
offset += size
if chunks_replaced and not has_chunks_healthy:
# if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later
item.chunks_healthy = item.chunks
if has_chunks_healthy and chunk_list == chunks_healthy:
logger.info('{}: Completely healed previously damaged file!'.format(item.path))
del item.chunks_healthy
item.chunks = chunk_list
if 'size' in item:
item_size = item.size
item_chunks_size = item.get_size(compressed=False, from_chunks=True)
if item_size != item_chunks_size:
# just warn, but keep the inconsistency, so that borg extract can warn about it.
logger.warning('{}: size inconsistency detected: size {}, chunks size {}'.format(
item.path, item_size, item_chunks_size))
def robust_iterator(archive):
"""Iterates through all archive items
Missing item chunks will be skipped and the msgpack stream will be restarted
"""
item_keys = frozenset(key.encode() for key in self.manifest.item_keys)
required_item_keys = frozenset(key.encode() for key in REQUIRED_ITEM_KEYS)
unpacker = RobustUnpacker(lambda item: isinstance(item, dict) and 'path' in item,
self.manifest.item_keys)
_state = 0
def missing_chunk_detector(chunk_id):
nonlocal _state
if _state % 2 != int(chunk_id not in self.chunks):
_state += 1
return _state
def report(msg, chunk_id, chunk_no):
cid = bin_to_hex(chunk_id)
msg += ' [chunk: %06d_%s]' % (chunk_no, cid) # see "debug dump-archive-items"
self.error_found = True
logger.error(msg)
def list_keys_safe(keys):
return ', '.join((k.decode() if isinstance(k, bytes) else str(k) for k in keys))
def valid_item(obj):
if not isinstance(obj, StableDict):
return False, 'not a dictionary'
# A bug in Attic up to and including release 0.13 added a (meaningless) b'acl' key to every item.
# We ignore it here, should it exist. See test_attic013_acl_bug for details.
obj.pop(b'acl', None)
keys = set(obj)
if not required_item_keys.issubset(keys):
return False, 'missing required keys: ' + list_keys_safe(required_item_keys - keys)
if not keys.issubset(item_keys):
return False, 'invalid keys: ' + list_keys_safe(keys - item_keys)
return True, ''
i = 0
for state, items in groupby(archive.items, missing_chunk_detector):
items = list(items)
if state % 2:
for chunk_id in items:
report('item metadata chunk missing', chunk_id, i)
i += 1
continue
if state > 0:
unpacker.resync()
for chunk_id, cdata in zip(items, repository.get_many(items)):
data = self.key.decrypt(chunk_id, cdata)
unpacker.feed(data)
try:
for item in unpacker:
valid, reason = valid_item(item)
if valid:
yield Item(internal_dict=item)
else:
report('Did not get expected metadata dict when unpacking item metadata (%s)' % reason, chunk_id, i)
except RobustUnpacker.UnpackerCrashed as err:
report('Unpacker crashed while unpacking item metadata, trying to resync...', chunk_id, i)
unpacker.resync()
except Exception:
report('Exception while unpacking item metadata', chunk_id, i)
raise
i += 1
if archive is None:
sort_by = sort_by.split(',')
if any((first, last, prefix)):
archive_infos = self.manifest.archives.list(sort_by=sort_by, prefix=prefix, first=first, last=last)
if prefix and not archive_infos:
logger.warning('--prefix %s does not match any archives', prefix)
if first and len(archive_infos) < first:
logger.warning('--first %d archives: only found %d archives', first, len(archive_infos))
if last and len(archive_infos) < last:
logger.warning('--last %d archives: only found %d archives', last, len(archive_infos))
else:
archive_infos = self.manifest.archives.list(sort_by=sort_by)
else:
# we only want one specific archive
try:
archive_infos = [self.manifest.archives[archive]]
except KeyError:
logger.error("Archive '%s' not found.", archive)
self.error_found = True
return
num_archives = len(archive_infos)
with cache_if_remote(self.repository) as repository:
for i, info in enumerate(archive_infos):
logger.info('Analyzing archive {} ({}/{})'.format(info.name, i + 1, num_archives))
archive_id = info.id
if archive_id not in self.chunks:
logger.error('Archive metadata block is missing!')
self.error_found = True
del self.manifest.archives[info.name]
continue
mark_as_possibly_superseded(archive_id)
cdata = self.repository.get(archive_id)
data = self.key.decrypt(archive_id, cdata)
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
if archive.version != 1:
raise Exception('Unknown archive metadata version')
archive.cmdline = [safe_decode(arg) for arg in archive.cmdline]
items_buffer = ChunkBuffer(self.key)
items_buffer.write_chunk = add_callback
for item in robust_iterator(archive):
if 'chunks' in item:
verify_file_chunks(item)
items_buffer.add(item)
items_buffer.flush(flush=True)
for previous_item_id in archive.items:
mark_as_possibly_superseded(previous_item_id)
archive.items = items_buffer.chunks
data = msgpack.packb(archive.as_dict(), unicode_errors='surrogateescape')
new_archive_id = self.key.id_hash(data)
cdata = self.key.encrypt(data)
add_reference(new_archive_id, len(data), len(cdata), cdata)
self.manifest.archives[info.name] = (new_archive_id, info.ts)
def orphan_chunks_check(self):
if self.check_all:
unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0}
orphaned = unused - self.possibly_superseded
if orphaned:
logger.error('{} orphaned objects found!'.format(len(orphaned)))
self.error_found = True
if self.repair:
for id_ in unused:
self.repository.delete(id_)
else:
logger.info('Orphaned objects check skipped (needs all archives checked).')
def finish(self, save_space=False):
if self.repair:
self.manifest.write()
self.repository.commit(save_space=save_space)
class ArchiveRecreater:
class Interrupted(Exception):
def __init__(self, metadata=None):
self.metadata = metadata or {}
@staticmethod
def is_temporary_archive(archive_name):
return archive_name.endswith('.recreate')
def __init__(self, repository, manifest, key, cache, matcher,
exclude_caches=False, exclude_if_present=None, keep_exclude_tags=False,
chunker_params=None, compression=None, recompress=False, always_recompress=False,
dry_run=False, stats=False, progress=False, file_status_printer=None,
checkpoint_interval=1800):
self.repository = repository
self.key = key
self.manifest = manifest
self.cache = cache
self.matcher = matcher
self.exclude_caches = exclude_caches
self.exclude_if_present = exclude_if_present or []
self.keep_exclude_tags = keep_exclude_tags
self.rechunkify = chunker_params is not None
if self.rechunkify:
logger.debug('Rechunking archives to %s', chunker_params)
self.chunker_params = chunker_params or CHUNKER_PARAMS
self.recompress = recompress
self.always_recompress = always_recompress
self.compression = compression or CompressionSpec('none')
self.seen_chunks = set()
self.dry_run = dry_run
self.stats = stats
self.progress = progress
self.print_file_status = file_status_printer or (lambda *args: None)
self.checkpoint_interval = None if dry_run else checkpoint_interval
def recreate(self, archive_name, comment=None, target_name=None):
assert not self.is_temporary_archive(archive_name)
archive = self.open_archive(archive_name)
target = self.create_target(archive, target_name)
if self.exclude_if_present or self.exclude_caches:
self.matcher_add_tagged_dirs(archive)
if self.matcher.empty() and not self.recompress and not target.recreate_rechunkify and comment is None:
logger.info("Skipping archive %s, nothing to do", archive_name)
return
self.process_items(archive, target)
replace_original = target_name is None
self.save(archive, target, comment, replace_original=replace_original)
def process_items(self, archive, target):
matcher = self.matcher
target_is_subset = not matcher.empty()
hardlink_masters = {} if target_is_subset else None
def item_is_hardlink_master(item):
return (target_is_subset and
hardlinkable(item.mode) and
item.get('hardlink_master', True) and
'source' not in item)
for item in archive.iter_items():
if not matcher.match(item.path):
self.print_file_status('x', item.path)
if item_is_hardlink_master(item):
hardlink_masters[item.path] = (item.get('chunks'), None)
continue
if target_is_subset and hardlinkable(item.mode) and item.get('source') in hardlink_masters:
# master of this hard link is outside the target subset
chunks, new_source = hardlink_masters[item.source]
if new_source is None:
# First item to use this master, move the chunks
item.chunks = chunks
hardlink_masters[item.source] = (None, item.path)
del item.source
else:
# Master was already moved, only update this item's source
item.source = new_source
if self.dry_run:
self.print_file_status('-', item.path)
else:
self.process_item(archive, target, item)
if self.progress:
target.stats.show_progress(final=True)
def process_item(self, archive, target, item):
if 'chunks' in item:
self.process_chunks(archive, target, item)
target.stats.nfiles += 1
target.add_item(item)
self.print_file_status(file_status(item.mode), item.path)
def process_chunks(self, archive, target, item):
if not self.recompress and not target.recreate_rechunkify:
for chunk_id, size, csize in item.chunks:
self.cache.chunk_incref(chunk_id, target.stats)
return item.chunks
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
chunk_processor = partial(self.chunk_processor, target)
target.chunk_file(item, self.cache, target.stats, chunk_iterator, chunk_processor)
def chunk_processor(self, target, data):
chunk_id = self.key.id_hash(data)
if chunk_id in self.seen_chunks:
return self.cache.chunk_incref(chunk_id, target.stats)
overwrite = self.recompress
if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks:
# Check if this chunk is already compressed the way we want it
old_chunk = self.key.decrypt(None, self.repository.get(chunk_id), decompress=False)
if Compressor.detect(old_chunk).name == self.key.compressor.decide(data).name:
# Stored chunk has the same compression we wanted
overwrite = False
chunk_entry = self.cache.add_chunk(chunk_id, data, target.stats, overwrite=overwrite, wait=False)
self.cache.repository.async_response(wait=False)
self.seen_chunks.add(chunk_entry.id)
return chunk_entry
def iter_chunks(self, archive, target, chunks):
chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _, _ in chunks])
if target.recreate_rechunkify:
# The target.chunker will read the file contents through ChunkIteratorFileWrapper chunk-by-chunk
# (does not load the entire file into memory)
file = ChunkIteratorFileWrapper(chunk_iterator)
yield from target.chunker.chunkify(file)
else:
for chunk in chunk_iterator:
yield chunk
def save(self, archive, target, comment=None, replace_original=True):
if self.dry_run:
return
timestamp = archive.ts.replace(tzinfo=None)
if comment is None:
comment = archive.metadata.get('comment', '')
target.save(timestamp=timestamp, comment=comment, additional_metadata={
'cmdline': archive.metadata.cmdline,
'recreate_cmdline': sys.argv,
})
if replace_original:
archive.delete(Statistics(), progress=self.progress)
target.rename(archive.name)
if self.stats:
target.end = datetime.utcnow()
log_multi(DASHES,
str(target),
DASHES,
str(target.stats),
str(self.cache),
DASHES)
def matcher_add_tagged_dirs(self, archive):
"""Add excludes to the matcher created by exclude_cache and exclude_if_present."""
def exclude(dir, tag_item):
if self.keep_exclude_tags:
tag_files.append(PathPrefixPattern(tag_item.path, recurse_dir=False))
tagged_dirs.append(FnmatchPattern(dir + '/', recurse_dir=False))
else:
tagged_dirs.append(PathPrefixPattern(dir, recurse_dir=False))
matcher = self.matcher
tag_files = []
tagged_dirs = []
# build hardlink masters, but only for paths ending in CACHE_TAG_NAME, so we can read hard-linked TAGs
cachedir_masters = {}
for item in archive.iter_items(
filter=lambda item: item.path.endswith(CACHE_TAG_NAME) or matcher.match(item.path)):
if item.path.endswith(CACHE_TAG_NAME):
cachedir_masters[item.path] = item
dir, tag_file = os.path.split(item.path)
if tag_file in self.exclude_if_present:
exclude(dir, item)
if stat.S_ISREG(item.mode):
if self.exclude_caches and tag_file == CACHE_TAG_NAME:
if 'chunks' in item:
file = open_item(archive, item)
else:
file = open_item(archive, cachedir_masters[item.source])
if file.read(len(CACHE_TAG_CONTENTS)).startswith(CACHE_TAG_CONTENTS):
exclude(dir, item)
matcher.add(tag_files, IECommand.Include)
matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
def create_target(self, archive, target_name=None):
"""Create target archive."""
target_name = target_name or archive.name + '.recreate'
target = self.create_target_archive(target_name)
# If the archives use the same chunker params, then don't rechunkify
source_chunker_params = tuple(archive.metadata.get('chunker_params', []))
target.recreate_rechunkify = self.rechunkify and source_chunker_params != target.chunker_params
if target.recreate_rechunkify:
logger.debug('Rechunking archive from %s to %s', source_chunker_params or '(unknown)', target.chunker_params)
return target
def create_target_archive(self, name):
target = Archive(self.repository, self.key, self.manifest, name, create=True,
progress=self.progress, chunker_params=self.chunker_params, cache=self.cache,
checkpoint_interval=self.checkpoint_interval)
return target
def open_archive(self, name, **kwargs):
return Archive(self.repository, self.key, self.manifest, name, cache=self.cache, **kwargs)
|
"""Tests for the pages app."""
from datetime import timedelta
from cms.externals import External
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.db import models
from django.test import TestCase
from django.utils.timezone import now
from .... import externals
from ....models.managers import publication_manager
from ..models import (ContentBase, Page, PageSearchAdapter, PageSitemap,
filter_indexable_pages)
class TestPageContent(ContentBase):
urlconf = 'cms.apps.pages.tests.urls'
class TestPageContentWithSections(ContentBase):
pass
class Section(models.Model):
page = models.ForeignKey(Page)
title = models.CharField(
max_length=100,
)
class TestPage(TestCase):
def setUp(self):
call_command('installwatson')
with externals.watson.context_manager("update_index")():
content_type = ContentType.objects.get_for_model(TestPageContent)
self.homepage = Page.objects.create(
title="Homepage",
slug='homepage',
content_type=content_type,
)
TestPageContent.objects.create(
page=self.homepage,
)
self.section = Page.objects.create(
parent=self.homepage,
title="Section",
content_type=content_type,
)
TestPageContent.objects.create(
page=self.section,
)
self.subsection = Page.objects.create(
parent=self.section,
title="Subsection",
content_type=content_type,
)
TestPageContent.objects.create(
page=self.subsection,
)
self.subsubsection = Page.objects.create(
parent=self.subsection,
title="Subsubsection",
content_type=content_type,
)
TestPageContent.objects.create(
page=self.subsubsection,
)
def testChildPrefetching(self):
# Make sure that prefetching works to two levels deep.
with self.assertNumQueries(3):
homepage = Page.objects.get_homepage()
with self.assertNumQueries(2):
subsection = homepage.children[0].children[0]
self.assertEqual(subsection.title, "Subsection")
with self.assertNumQueries(0):
subsection = homepage.navigation[0].navigation[0]
self.assertEqual(subsection.title, "Subsection")
# Make sure that, beyond this, it doesn't go pathalogical.
with self.assertNumQueries(1):
subsubsection = subsection.children[0]
self.assertEqual(subsubsection.title, "Subsubsection")
with self.assertNumQueries(0):
subsubsection = subsection.children[0]
self.assertEqual(subsubsection.title, "Subsubsection")
def test_page_reverse(self):
url = self.homepage.reverse('detail', kwargs={
'slug': self.homepage.slug
})
self.assertEqual(url, '/homepage/')
url = self.homepage.reverse('index')
self.assertEqual(url, '/')
def test_filter_indexable_pages(self):
pages = Page.objects.all()
self.assertEqual(len(pages), 4)
pages = filter_indexable_pages(Page.objects.all())
self.assertEqual(len(pages), 4)
# Turn off indexing on the homepage.
self.homepage.robots_index = False
self.homepage.save()
pages = filter_indexable_pages(Page.objects.all())
self.assertEqual(len(pages), 3)
def test_pagesitemap_items(self):
sitemap = PageSitemap()
self.assertEqual(len(sitemap.items()), 4)
# Turn off indexing on the homepage.
self.homepage.robots_index = False
self.homepage.save()
self.assertEqual(len(sitemap.items()), 3)
def test_contentbase_unicode(self):
self.assertEqual(self.homepage.content.__str__(), 'Homepage')
self.assertEqual(self.section.content.__str__(), 'Section')
self.assertEqual(self.subsection.content.__str__(), 'Subsection')
self.assertEqual(self.subsubsection.content.__str__(), 'Subsubsection')
def test_pagesearchadapter_get_live_queryset(self):
self.assertEqual(len(externals.watson.search("Homepage", models=(Page,))), 1)
with publication_manager.select_published(True):
self.assertEqual(len(externals.watson.search("Homepage", models=(Page,))), 1)
self.homepage.is_online = False
self.homepage.save()
self.assertEqual(len(externals.watson.search("Homepage", models=(Page,))), 0)
def test_page_get_absolute_url(self):
with externals.watson.context_manager("update_index")():
Page.objects.all().delete()
content_type = ContentType.objects.get_for_model(TestPageContent)
new_page = Page(
content_type=content_type,
parent=None,
left=None,
right=None,
)
self.assertIsNone(new_page.cached_url)
new_page.save()
self.assertEqual(new_page.cached_url, '/')
TestPageContent.objects.create(
page=new_page,
)
self.assertEqual(new_page.get_absolute_url(), '/')
self.assertEqual(new_page.get_absolute_url(True), '/')
new_page = Page.objects.get(pk=new_page.pk)
self.assertEqual(new_page.cached_url, '/')
self.assertEqual(new_page.get_absolute_url(), '/')
def test_last_modified(self):
# We have no versions
self.assertEquals(self.homepage.last_modified(), '-')
# Create an initial revision.
with externals.reversion.create_revision():
self.homepage.save()
# We have reversion and a version in the db, last_modified should not be empty
self.assertNotEquals(self.homepage.last_modified(), '-')
# Remove reversion
externals.reversion = None
# We have no reversion
self.assertEquals(self.homepage.last_modified(), '-')
# Add back reversion
externals.reversion = External("reversion")
def test_publication(self):
self.homepage.publication_date = now() + timedelta(days=10)
self.homepage.save()
self.section.publication_date = now() + timedelta(days=10)
self.section.save()
self.subsection.publication_date = now() + timedelta(days=10)
self.subsection.save()
self.subsubsection.publication_date = now() + timedelta(days=10)
self.subsubsection.save()
with publication_manager.select_published(True):
self.assertEqual(Page.objects.count(), 0)
with publication_manager.select_published(False):
self.assertEqual(Page.objects.count(), 4)
# We need to generate an exception within the published block.
with self.assertRaises(TypeError), \
publication_manager.select_published(True):
assert 1 / 'a'
class TestSectionPage(TestCase):
def setUp(self):
with externals.watson.context_manager("update_index")():
content_type = ContentType.objects.get_for_model(TestPageContentWithSections)
self.homepage = Page.objects.create(
title="Homepage",
slug='homepage',
content_type=content_type,
)
TestPageContentWithSections.objects.create(
page=self.homepage,
)
def test_pagesearchadapter_get_content(self):
search_adapter = PageSearchAdapter(Page)
content = search_adapter.get_content(self.homepage)
self.assertEqual(content, " homepage Homepage / ")
class TestPageComplex(TestCase):
"""
Page structure:
Homepage
|
+------------------+-----------------------+
| | |
Tree 1 - Page 1 Tree 2 - Page 1 Tree 3 - Page 1
| |
+----------+----------+ +----------+----------+
| | | |
Tree 1 - Page 2 Tree 1 - Page 3 Tree 3 - Page 2 Tree 3 - Page 3
|
+----------+----------+
| |
Tree 3 - Page 4 Tree 3 - Page 5
"""
def setUp(self):
structure = {
'title': 'Homepage',
'children': [
{
'title': 'Tree 1 - Page 1',
'children': [
{
'title': 'Tree 1 - Page 2'
},
{
'title': 'Tree 1 - Page 3'
}
]
},
{
'title': 'Tree 2 - Page 1'
},
{
'title': 'Tree 3 - Page 1',
'children': [
{
'title': 'Tree 3 - Page 2',
'children': [
{
'title': 'Tree 3 - Page 4'
},
{
'title': 'Tree 3 - Page 5'
}
]
},
{
'title': 'Tree 3 - Page 3'
}
]
}
]
}
content_type = ContentType.objects.get_for_model(TestPageContent)
self.page_ids = {}
self.pages = {}
def _add_page(page, parent=None):
slug = page['title'].replace(' ', '_').replace('-', '_')
page_obj = Page.objects.create(
title=page['title'],
slug=slug,
content_type=content_type,
parent=parent,
)
TestPageContent.objects.create(
page=page_obj,
)
self.page_ids[slug] = page_obj.pk
if page.get('children', None):
for child in page['children']:
_add_page(child, page_obj)
with externals.watson.context_manager("update_index")():
_add_page(structure)
self._rebuild_page_dict()
def _rebuild_page_dict(self):
self.pages = {}
for page in self.page_ids:
try:
self.pages[page] = Page.objects.get(pk=self.page_ids[page])
# Handle tests involving deletions.
except Page.DoesNotExist:
pass
def test_page_excise_branch(self):
# Excising a branch which hasn't been deleted should have no affect.
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 7)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 8)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 9)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 10)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 17)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 18)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 13)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 14)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 15)
self.pages['Homepage']._excise_branch()
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 7)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 8)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 9)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 10)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 17)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 18)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 13)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 14)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 15)
def test_page_save__create_with_sides(self):
with externals.watson.context_manager("update_index")():
content_type = ContentType.objects.get_for_model(TestPageContent)
# Create a page with a manual left and right defined.
page_obj = Page.objects.create(
title='Foo',
content_type=content_type,
parent=self.pages['Tree_1___Page_1'],
left=7,
right=8,
)
TestPageContent.objects.create(
page=page_obj,
)
self.assertEqual(page_obj.title, 'Foo')
def test_page_save__move_branch_left(self):
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 7)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 8)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 9)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 10)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 17)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 18)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 13)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 14)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 15)
self.pages['Tree_3___Page_1'].parent = self.pages['Tree_1___Page_1']
self.pages['Tree_3___Page_1'].save()
# Rebuild page dict.
self._rebuild_page_dict()
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 17)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 18)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 7)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 8)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 13)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 14)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 15)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 9)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 10)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 12)
def test_page_save__move_branch_right(self):
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 7)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 8)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 9)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 10)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 17)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 18)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 13)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 14)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 15)
self.pages['Tree_1___Page_1'].parent = self.pages['Tree_3___Page_1']
self.pages['Tree_1___Page_1'].save()
# Rebuild page dict.
self._rebuild_page_dict()
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 20)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 13)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 18)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 14)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 15)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 16)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 17)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 3)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 4)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 19)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 5)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 10)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 6)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 7)
self.assertEqual(self.pages['Tree_3___Page_5'].left, 8)
self.assertEqual(self.pages['Tree_3___Page_5'].right, 9)
def test_page_delete(self):
self.pages['Tree_3___Page_5'].content.delete()
self.pages['Tree_3___Page_5'].delete()
# Rebuild page dict.
self._rebuild_page_dict()
self.assertEqual(self.pages['Homepage'].left, 1)
self.assertEqual(self.pages['Homepage'].right, 18)
self.assertEqual(self.pages['Tree_1___Page_1'].left, 2)
self.assertEqual(self.pages['Tree_1___Page_1'].right, 7)
self.assertEqual(self.pages['Tree_1___Page_2'].left, 3)
self.assertEqual(self.pages['Tree_1___Page_2'].right, 4)
self.assertEqual(self.pages['Tree_1___Page_3'].left, 5)
self.assertEqual(self.pages['Tree_1___Page_3'].right, 6)
self.assertEqual(self.pages['Tree_2___Page_1'].left, 8)
self.assertEqual(self.pages['Tree_2___Page_1'].right, 9)
self.assertEqual(self.pages['Tree_3___Page_1'].left, 10)
self.assertEqual(self.pages['Tree_3___Page_1'].right, 17)
self.assertEqual(self.pages['Tree_3___Page_2'].left, 11)
self.assertEqual(self.pages['Tree_3___Page_2'].right, 14)
self.assertEqual(self.pages['Tree_3___Page_3'].left, 15)
self.assertEqual(self.pages['Tree_3___Page_3'].right, 16)
self.assertEqual(self.pages['Tree_3___Page_4'].left, 12)
self.assertEqual(self.pages['Tree_3___Page_4'].right, 13)
with self.assertRaises(KeyError):
self.pages['Tree_3___Page_5']
|
from __future__ import unicode_literals
import os.path
import difflib
from common import GitChangelogTestCase, w, cmd
class TestEnvironmentCornerCases(GitChangelogTestCase):
def test_config_file_is_not_a_file(self):
w("""
mkdir .gitchangelog.rc
""")
out, err, errlvl = cmd('$tprog')
self.assertEqual(
errlvl, 1,
msg="Should fail when bogus config file exists but is not a file")
self.assertContains(
err, "not a file",
msg="There should be a error message stating that config file is not a file."
"Current stderr:\n%r" % err)
self.assertEqual(
out, "",
msg="There should be no standard output. "
"Current stdout:\n%s" % out)
|
from .models import *
from .api import *
from . import grade_file
|
import numpy as np
import pandas as pd
from math import sqrt
from sklearn import ensemble
from sklearn.metrics import mean_squared_error
df_train = pd.read_csv("train_numerical_head.csv")
df_train.head()
feats = df_train.drop(str(42), axis=1)
X_train = feats.values #features
y_train = df_train[str(42)].values #target
df_test = pd.read_csv("test_numerical_head.csv")
df_train.head()
X_test = feats.values #features
for i in range(0, len(y_train)-1):
if y_train[i]>10000000:
print "works"
y_train[i]=10000000
params = {'n_estimators': 500, 'max_depth': 3, 'min_samples_split': 1,
'learning_rate': 0.001, 'loss': 'lad'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
result = clf.predict(X_test)
result = np.asarray(result)
np.savetxt("result.csv", result, delimiter=",")
rmse = sqrt(mean_squared_error(y_train, clf.predict(X_train)))
print "GradientBoostingRegressor RMSE: " , rmse
|
try:
import urllib2 as urllib
except ImportError:
# For Python 3+
import urllib.request as urllib
import numpy as np
import json
BASE_PATH = "http://starserver.thelangton.org.uk/lucid-data-browser/api/"
def get_data_files(run = None):
stream = urllib.urlopen(BASE_PATH + "get/data_files")
if not stream.getcode() == 200:
raise Exception("An error occurred whilst processing the request")
data_files = json.loads(stream.read())
if run:
filtered_data_files = []
for data_file in data_files:
if data_file['run'] == run:
filtered_data_files.append(data_file)
return filtered_data_files
return data_files
def get_runs():
# Get a list of available data files and extract runs from this
stream = urllib.urlopen(BASE_PATH + "get/data_files")
if not stream.getcode() == 200:
raise Exception("An error occurred whilst processing the request")
data_files = json.loads(stream.read())
runs = []
for data_file in data_files:
if not data_file['run'] in runs:
runs.append(data_file['run'])
return runs
class Frame:
pass
def get_frames(file_id, run = None):
stream = urllib.urlopen(BASE_PATH + "get/frames?data_file=" + str(int(file_id)))
if not stream.getcode() == 200:
raise Exception("That data file could not be found")
# .decode required for use in Python 3+
frames = json.loads(stream.read().decode('utf-8'))
updated_frames = []
for frame in frames:
frame_obj = Frame()
frame_obj.__dict__ = frame
frame = frame_obj
new_channels = []
for channel_id in range(5):
channel = np.zeros((256, 256))
if str(channel_id) in frame.channels.keys():
for line in frame.channels[str(channel_id)].split("\n")[:-1]: # Last line is blank
vals = line.split("\t")
x = int(float(vals[0].strip()))
y = int(float(vals[1].strip()))
c = int(float(vals[2].strip()))
channel[x][y] = c
new_channels.append(channel)
else:
new_channels.append(None)
frame.channels = new_channels
updated_frames.append(frame)
return updated_frames
|
from msrest.serialization import Model
class Operation(Model):
"""Storage REST API operation definition.
:param name: Operation name: {provider}/{resource}/{operation}
:type name: str
:param display: Display metadata associated with the operation.
:type display: ~azure.mgmt.storage.v2017_10_01.models.OperationDisplay
:param origin: The origin of operations.
:type origin: str
:param service_specification: One property of operation, include metric
specifications.
:type service_specification:
~azure.mgmt.storage.v2017_10_01.models.ServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(self, name=None, display=None, origin=None, service_specification=None):
super(Operation, self).__init__()
self.name = name
self.display = display
self.origin = origin
self.service_specification = service_specification
|
"""
Script para generar documento con la historia de los cambios.
"""
import os
import subprocess
SRC_DIR = os.path.join(os.path.dirname(__file__), os.pardir)
DOCS_DIR = os.path.abspath(os.path.join(SRC_DIR, 'docs'))
TMP_FILE = os.path.join(DOCS_DIR, 'changelog-tmp.tex')
GIT_CMD = ['git', 'log', '--no-merges', '--date-order', '--pretty=format:%ai & %an & %s%n%n%b%n%n \\\\ \\hline']
LATEX_CMD = ['pdflatex', os.path.join(DOCS_DIR, 'changelog.tex')]
def main():
"""
Función principal del script.
"""
# Get the content of the file from the output of the git log command.
os.chdir(SRC_DIR)
with open(TMP_FILE, 'w') as tmp_file:
subprocess.call(GIT_CMD, stdout=tmp_file)
# Escape underscores in the LaTeX file.
with open(TMP_FILE, 'r+') as tmp_file:
data = tmp_file.read()
tmp_file.seek(0)
data = data.replace(r'_', r'\_')
data = data.replace(r'<', r'$<$')
data = data.replace(r'>', r'$>$')
tmp_file.write(data)
# Compile the LaTeX file.
os.chdir(DOCS_DIR)
for i in xrange(2):
subprocess.call(LATEX_CMD)
for f in os.listdir(DOCS_DIR):
if f.startswith('changelog') and f not in ('changelog.tex', 'changelog.pdf'):
os.remove(os.path.join(DOCS_DIR, f))
if __name__ == '__main__':
main()
|
from __future__ import absolute_import, print_function
import traceback
from autobahn.websocket import protocol
from autobahn.websocket import http
from autobahn.wamp.interfaces import ITransport
from autobahn.wamp.exception import ProtocolError, SerializationError, TransportLost
__all__ = ('WampWebSocketServerProtocol',
'WampWebSocketClientProtocol',
'WampWebSocketServerFactory',
'WampWebSocketClientFactory')
class WampWebSocketProtocol(object):
"""
Base class for WAMP-over-WebSocket transport mixins.
"""
_session = None # default; self.session is set in onOpen
def _bailout(self, code, reason=None):
if self.factory.debug_wamp:
print("Failing WAMP-over-WebSocket transport: code = {0}, reason = '{1}'".format(code, reason))
self.failConnection(code, reason)
def onOpen(self):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onOpen`
"""
# WebSocket connection established. Now let the user WAMP session factory
# create a new WAMP session and fire off session open callback.
try:
self._session = self.factory._factory()
self._session.onOpen(self)
except Exception as e:
if self.factory.debug_wamp:
traceback.print_exc()
# Exceptions raised in onOpen are fatal ..
reason = "WAMP Internal Error ({0})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INTERNAL_ERROR, reason=reason)
def onClose(self, wasClean, code, reason):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onClose`
"""
# WAMP session might never have been established in the first place .. guard this!
if self._session is not None:
# WebSocket connection lost - fire off the WAMP
# session close callback
# noinspection PyBroadException
try:
if self.factory.debug_wamp:
print("WAMP-over-WebSocket transport lost: wasClean = {0}, code = {1}, reason = '{2}'".format(wasClean, code, reason))
self._session.onClose(wasClean)
except Exception:
print("Error invoking onClose():")
traceback.print_exc()
self._session = None
def onMessage(self, payload, isBinary):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
"""
try:
for msg in self._serializer.unserialize(payload, isBinary):
if self.factory.debug_wamp:
print("RX {0}".format(msg))
self._session.onMessage(msg)
except ProtocolError as e:
print(e)
if self.factory.debug_wamp:
traceback.print_exc()
reason = "WAMP Protocol Error ({0})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason=reason)
except Exception as e:
if self.factory.debug_wamp:
traceback.print_exc()
reason = "WAMP Internal Error ({0})".format(e)
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_INTERNAL_ERROR, reason=reason)
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if self.isOpen():
try:
if self.factory.debug_wamp:
print("TX {0}".format(msg))
payload, isBinary = self._serializer.serialize(msg)
except Exception as e:
# all exceptions raised from above should be serialization errors ..
raise SerializationError("WAMP serialization error ({0})".format(e))
else:
self.sendMessage(payload, isBinary)
else:
raise TransportLost()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
return self._session is not None
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self.isOpen():
self.sendClose(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL)
else:
raise TransportLost()
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
if self.isOpen():
self._bailout(protocol.WebSocketProtocol.CLOSE_STATUS_CODE_GOING_AWAY)
else:
raise TransportLost()
ITransport.register(WampWebSocketProtocol)
def parseSubprotocolIdentifier(subprotocol):
try:
s = subprotocol.split('.')
if s[0] != "wamp":
raise Exception("invalid protocol %s" % s[0])
version = int(s[1])
serializerId = '.'.join(s[2:])
return version, serializerId
except:
return None, None
class WampWebSocketServerProtocol(WampWebSocketProtocol):
"""
Mixin for WAMP-over-WebSocket server transports.
"""
STRICT_PROTOCOL_NEGOTIATION = True
def onConnect(self, request):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onConnect`
"""
headers = {}
for subprotocol in request.protocols:
version, serializerId = parseSubprotocolIdentifier(subprotocol)
if version == 2 and serializerId in self.factory._serializers.keys():
self._serializer = self.factory._serializers[serializerId]
return subprotocol, headers
if self.STRICT_PROTOCOL_NEGOTIATION:
raise http.HttpException(http.BAD_REQUEST[0], "This server only speaks WebSocket subprotocols %s" % ', '.join(self.factory.protocols))
else:
# assume wamp.2.json
self._serializer = self.factory._serializers['json']
return None, headers
class WampWebSocketClientProtocol(WampWebSocketProtocol):
"""
Mixin for WAMP-over-WebSocket client transports.
"""
STRICT_PROTOCOL_NEGOTIATION = True
def onConnect(self, response):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onConnect`
"""
if response.protocol not in self.factory.protocols:
if self.STRICT_PROTOCOL_NEGOTIATION:
raise Exception("Server does not speak any of the WebSocket subprotocols we requested (%s)." % ', '.join(self.factory.protocols))
else:
# assume wamp.2.json
serializerId = 'json'
else:
version, serializerId = parseSubprotocolIdentifier(response.protocol)
self._serializer = self.factory._serializers[serializerId]
class WampWebSocketFactory(object):
"""
Base class for WAMP-over-WebSocket transport factory mixins.
"""
def __init__(self, factory, serializers=None, debug_wamp=False):
"""
Ctor.
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializers: A list of WAMP serializers to use (or None for default
serializers). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
:type serializers: list
"""
assert(callable(factory))
self._factory = factory
self.debug_wamp = debug_wamp
if serializers is None:
serializers = []
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer(batched=True))
serializers.append(MsgPackSerializer())
except ImportError:
pass
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer(batched=True))
serializers.append(JsonSerializer())
except ImportError:
pass
if not serializers:
raise Exception("could not import any WAMP serializers")
self._serializers = {}
for ser in serializers:
self._serializers[ser.SERIALIZER_ID] = ser
self._protocols = ["wamp.2.%s" % ser.SERIALIZER_ID for ser in serializers]
class WampWebSocketServerFactory(WampWebSocketFactory):
"""
Mixin for WAMP-over-WebSocket server transport factories.
"""
class WampWebSocketClientFactory(WampWebSocketFactory):
"""
Mixin for WAMP-over-WebSocket client transport factories.
"""
|
from datetime import datetime
from itertools import groupby
from operator import itemgetter
import dateutil.parser
from flask import render_template
from pytz import timezone
from indico.web.http_api.metadata.serializer import Serializer
def _deserialize_date(date_dict):
if isinstance(date_dict, datetime):
return date_dict
dt = datetime.combine(dateutil.parser.parse(date_dict['date']).date(),
dateutil.parser.parse(date_dict['time']).time())
return timezone(date_dict['tz']).localize(dt)
class HTML4Serializer(Serializer):
schemaless = False
_mime = 'text/html; charset=utf-8'
def _execute(self, fossils):
results = fossils['results']
# XXX: is this actually needed?!
if not isinstance(results, list):
results = [results]
events = [{'id': int(e['id']),
'start_dt': _deserialize_date(e['startDate']),
'category': e['category'],
'url': e['url'],
'title': e['title'],
'room': e['room']} for e in results]
events_by_date = groupby(sorted(events, key=itemgetter('start_dt')), key=lambda x: x['start_dt'].date())
return render_template('api/event_list.html', events_by_date=events_by_date, ts=fossils['ts'])
|
import logging
import sys
LOGGER = logging.getLogger('PYWPS')
PY2 = sys.version_info[0] == 2
if PY2:
LOGGER.debug('Python 2.x')
text_type = unicode
from StringIO import StringIO
from flufl.enum import Enum
from urlparse import urlparse
from urlparse import urljoin
from urllib2 import urlopen
else:
LOGGER.debug('Python 3.x')
text_type = str
from io import StringIO
from enum import Enum
from urllib.parse import urlparse
from urllib.parse import urljoin
from urllib.request import urlopen
|
import pytest
from molecule.command import check
def test_execute_raises_when_instance_not_created(
patched_check_main, patched_print_error, molecule_instance):
c = check.Check({}, {}, molecule_instance)
with pytest.raises(SystemExit):
c.execute()
msg = ('Instance(s) not created, `check` should be run against '
'created instance(s).')
patched_print_error.assert_called_once_with(msg)
def test_execute(mocker, patched_check_main, patched_ansible_playbook,
patched_print_info, molecule_instance):
molecule_instance.state.change_state('created', True)
molecule_instance.state.change_state('converged', True)
molecule_instance._driver = mocker.Mock(
ansible_connection_params={'debug': True})
patched_ansible_playbook.return_value = 'returned'
c = check.Check({}, {}, molecule_instance)
result = c.execute()
msg = "Performing a 'Dry Run' of playbook..."
patched_print_info.assert_called_once_with(msg)
patched_ansible_playbook.assert_called_once_with(hide_errors=True)
assert 'returned' == result
|
import pysam
import sys
from re import sub
from random import random
from uuid import uuid4
if len(sys.argv) == 2:
assert sys.argv[1].endswith('.bam')
inbamfn = sys.argv[1]
outbamfn = sub('.bam$', '.renamereads.bam', inbamfn)
inbam = pysam.Samfile(inbamfn, 'rb')
outbam = pysam.Samfile(outbamfn, 'wb', template=inbam)
paired = {}
n = 0
p = 0
u = 0
w = 0
m = 0
for read in inbam.fetch(until_eof=True):
n += 1
if read.is_paired:
p += 1
if read.qname in paired:
uuid = paired[read.qname]
del paired[read.qname]
read.qname = uuid
outbam.write(read)
w += 1
m += 1
else:
newname = str(uuid4())
paired[read.qname] = newname
read.qname = newname
outbam.write(read)
w += 1
else:
u += 1
read.qname = str(uuid4())
outbam.write(read)
w += 1
if n % 1000000 == 0:
print "Processed", n, "reads:", p, "paired,", u, "unpaired,", w, "written,", m, "mates found."
outbam.close()
inbam.close()
else:
print "usage:",sys.argv[0],"<bam (uses less memory if sorted by readname)>"
|
try:
import importlib
except ImportError:
from django.utils import importlib
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
text_type = str
else:
string_types = basestring
text_type = unicode
def import_attribute(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
module_name, attribute = name.rsplit('.', 1)
module = importlib.import_module(module_name)
return getattr(module, attribute)
|
"""Add rsa_license_plate field
Revision ID: 98ad75de45b2
Revises: caae04cdec5c
Create Date: 2020-01-15 03:25:27.864784
"""
revision = '98ad75de45b2'
down_revision = 'caae04cdec5c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('markers', sa.Column('rsa_license_plate', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('markers', 'rsa_license_plate')
# ### end Alembic commands ###
|
import numpy as np
def get_distribution(GC_fraction):
from simdna.util import DiscreteDistribution
return DiscreteDistribution({
'A': (1 - GC_fraction) / 2,
'C': GC_fraction / 2,
'G': GC_fraction / 2,
'T': (1 - GC_fraction) / 2
})
def simple_motif_embedding(motif_name, seq_length, num_seqs, GC_fraction):
"""
Simulates sequences with a motif embedded anywhere in the sequence.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_seqs: int
number of sequences
GC_fraction : float
GC basepair fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
embedding_arr: 1darray
Array of embedding objects.
"""
import simdna
from simdna import synthetic
if motif_name is None:
embedders = []
else:
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
embedders = [
synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator))
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def motif_density(motif_name,
seq_length,
num_seqs,
min_counts,
max_counts,
GC_fraction,
central_bp=None):
"""
Returns sequences with motif density, along with embeddings array.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
if central_bp is not None:
position_generator = synthetic.InsideCentralBp(central_bp)
else:
position_generator = synthetic.UniformPositionGenerator()
quantity_generator = synthetic.UniformIntegerGenerator(min_counts, max_counts)
embedders = [
synthetic.RepeatedEmbedder(
synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator),
position_generator), quantity_generator)
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
embedders)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, embedding_arr
def simulate_single_motif_detection(motif_name, seq_length, num_pos, num_neg,
GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequence with a motif
embedded anywhere in the sequence
- Negative class sequence without the motif
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: 1darray
Array of embedding objects.
"""
motif_sequence_arr, positive_embedding_arr = simple_motif_embedding(
motif_name, seq_length, num_pos, GC_fraction)
random_sequence_arr, negative_embedding_arr = simple_motif_embedding(
None, seq_length, num_neg, GC_fraction)
sequence_arr = np.concatenate((motif_sequence_arr, random_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_counting(motif_name, seq_length, pos_counts, neg_counts,
num_pos, num_neg, GC_fraction):
"""
Generates data for motif counting task.
Parameters
----------
motif_name : str
seq_length : int
pos_counts : list
(min_counts, max_counts) for positive set.
neg_counts : list
(min_counts, max_counts) for negative set.
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_count_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, pos_counts[0], pos_counts[1],
GC_fraction)
neg_count_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_pos, neg_counts[0], neg_counts[1],
GC_fraction)
sequence_arr = np.concatenate((pos_count_sequence_array,
neg_count_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_motif_density_localization(motif_name, seq_length, center_size,
min_motif_counts, max_motif_counts,
num_pos, num_neg, GC_fraction):
"""
Simulates two classes of seqeuences:
- Positive class sequences with multiple motif instances
in center of the sequence.
- Negative class sequences with multiple motif instances
anywhere in the sequence.
The number of motif instances is uniformly sampled
between minimum and maximum motif counts.
Parameters
----------
motif_name : str
encode motif name
seq_length : int
length of sequence
center_size : int
length of central part of the sequence where motifs can be positioned
min_motif_counts : int
minimum number of motif instances
max_motif_counts : int
maximum number of motif instances
num_pos : int
number of positive class sequences
num_neg : int
number of negative class sequences
GC_fraction : float
GC fraction in background sequence
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
localized_density_sequence_array, positive_embedding_arr = motif_density(
motif_name, seq_length, num_pos, min_motif_counts, max_motif_counts,
GC_fraction, center_size)
unlocalized_density_sequence_array, negative_embedding_arr = motif_density(
motif_name, seq_length, num_neg, min_motif_counts, max_motif_counts,
GC_fraction)
sequence_arr = np.concatenate((localized_density_sequence_array,
unlocalized_density_sequence_array))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_multi_motif_embedding(motif_names, seq_length, min_num_motifs,
max_num_motifs, num_seqs, GC_fraction):
"""
Generates data for multi motif recognition task.
Parameters
----------
motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_seqs : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : ndarray
Contains labels for each motif.
embedding_arr: 1darray
Array of embedding objects.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
def get_embedder(motif_name):
substring_generator = synthetic.PwmSamplerFromLoadedMotifs(
loaded_motifs, motif_name)
return synthetic.SubstringEmbedder(
synthetic.ReverseComplementWrapper(substring_generator),
name=motif_name)
embedders = [get_embedder(motif_name) for motif_name in motif_names]
quantity_generator = synthetic.UniformIntegerGenerator(
min_num_motifs, max_num_motifs)
combined_embedder = [
synthetic.RandomSubsetOfEmbedders(quantity_generator, embedders)
]
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
combined_embedder)
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_seqs).generateSequences())
sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
label_generator = synthetic.IsInTraceLabelGenerator(np.asarray(motif_names))
y = np.array(
[
label_generator.generateLabels(generated_seq)
for generated_seq in generated_sequences
],
dtype=bool)
embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
return sequence_arr, y, embedding_arr
def simulate_differential_accessibility(
pos_motif_names, neg_motif_names, seq_length, min_num_motifs,
max_num_motifs, num_pos, num_neg, GC_fraction):
"""
Generates data for differential accessibility task.
Parameters
----------
pos_motif_names : list
List of strings.
neg_motif_names : list
List of strings.
seq_length : int
min_num_motifs : int
max_num_motifs : int
num_pos : int
num_neg : int
GC_fraction : float
Returns
-------
sequence_arr : 1darray
Contains sequence strings.
y : 1darray
Contains labels.
embedding_arr: 1darray
Array of embedding objects.
"""
pos_motif_sequence_arr, _, positive_embedding_arr = simulate_multi_motif_embedding(
pos_motif_names, seq_length, min_num_motifs, max_num_motifs, num_pos,
GC_fraction)
neg_motif_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
neg_motif_names, seq_length, min_num_motifs, max_num_motifs, num_neg,
GC_fraction)
sequence_arr = np.concatenate((pos_motif_sequence_arr,
neg_motif_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
def simulate_heterodimer_grammar(motif1, motif2, seq_length, min_spacing,
max_spacing, num_pos, num_neg, GC_fraction):
"""
Simulates two classes of sequences with motif1 and motif2:
- Positive class sequences with motif1 and motif2 positioned
min_spacing and max_spacing
- Negative class sequences with independent motif1 and motif2 positioned
anywhere in the sequence, not as a heterodimer grammar
Parameters
----------
seq_length : int, length of sequence
GC_fraction : float, GC fraction in background sequence
num_pos : int, number of positive class sequences
num_neg : int, number of negatice class sequences
motif1 : str, encode motif name
motif2 : str, encode motif name
min_spacing : int, minimum inter motif spacing
max_spacing : int, maximum inter motif spacing
Returns
-------
sequence_arr : 1darray
Array with sequence strings.
y : 1darray
Array with positive/negative class labels.
embedding_arr: list
List of embedding objects.
"""
import simdna
from simdna import synthetic
loaded_motifs = synthetic.LoadedEncodeMotifs(
simdna.ENCODE_MOTIFS_PATH, pseudocountProb=0.001)
motif1_generator = synthetic.ReverseComplementWrapper(
synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif1))
motif2_generator = synthetic.ReverseComplementWrapper(
synthetic.PwmSamplerFromLoadedMotifs(loaded_motifs, motif2))
separation_generator = synthetic.UniformIntegerGenerator(
min_spacing, max_spacing)
embedder = synthetic.EmbeddableEmbedder(
synthetic.PairEmbeddableGenerator(motif1_generator, motif2_generator,
separation_generator))
embed_in_background = synthetic.EmbedInABackground(
synthetic.ZeroOrderBackgroundGenerator(
seq_length, discreteDistribution=get_distribution(GC_fraction)),
[embedder])
generated_sequences = tuple(
synthetic.GenerateSequenceNTimes(embed_in_background,
num_pos).generateSequences())
grammar_sequence_arr = np.array(
[generated_seq.seq for generated_seq in generated_sequences])
positive_embedding_arr = [
generated_seq.embeddings for generated_seq in generated_sequences
]
nongrammar_sequence_arr, _, negative_embedding_arr = simulate_multi_motif_embedding(
[motif1, motif2], seq_length, 2, 2, num_neg, GC_fraction)
sequence_arr = np.concatenate((grammar_sequence_arr, nongrammar_sequence_arr))
y = np.array([[True]] * num_pos + [[False]] * num_neg)
embedding_arr = positive_embedding_arr + negative_embedding_arr
return sequence_arr, y, embedding_arr
|
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = u"Convert user passwords to use built-in Django bcrypt."
def handle(self, *args, **options):
users = User.objects.all()
self.stdout.write(u"Updating %s user passwords..." % users.count())
for user in users:
if user.password[0:3] == 'bc$':
pw = user.password
new_password = pw[0:3].replace('bc$', 'bcrypt$') + pw[3:]
user.password = new_password
user.save()
self.stdout.write(u"User passwords migrated successfully.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.